xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 8e2a46a4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	17
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 	MGMT_OP_GET_ADV_SIZE_INFO,
106 	MGMT_OP_START_LIMITED_DISCOVERY,
107 	MGMT_OP_READ_EXT_INFO,
108 	MGMT_OP_SET_APPEARANCE,
109 	MGMT_OP_SET_BLOCKED_KEYS,
110 	MGMT_OP_SET_WIDEBAND_SPEECH,
111 	MGMT_OP_READ_SECURITY_INFO,
112 	MGMT_OP_READ_EXP_FEATURES_INFO,
113 	MGMT_OP_SET_EXP_FEATURE,
114 };
115 
116 static const u16 mgmt_events[] = {
117 	MGMT_EV_CONTROLLER_ERROR,
118 	MGMT_EV_INDEX_ADDED,
119 	MGMT_EV_INDEX_REMOVED,
120 	MGMT_EV_NEW_SETTINGS,
121 	MGMT_EV_CLASS_OF_DEV_CHANGED,
122 	MGMT_EV_LOCAL_NAME_CHANGED,
123 	MGMT_EV_NEW_LINK_KEY,
124 	MGMT_EV_NEW_LONG_TERM_KEY,
125 	MGMT_EV_DEVICE_CONNECTED,
126 	MGMT_EV_DEVICE_DISCONNECTED,
127 	MGMT_EV_CONNECT_FAILED,
128 	MGMT_EV_PIN_CODE_REQUEST,
129 	MGMT_EV_USER_CONFIRM_REQUEST,
130 	MGMT_EV_USER_PASSKEY_REQUEST,
131 	MGMT_EV_AUTH_FAILED,
132 	MGMT_EV_DEVICE_FOUND,
133 	MGMT_EV_DISCOVERING,
134 	MGMT_EV_DEVICE_BLOCKED,
135 	MGMT_EV_DEVICE_UNBLOCKED,
136 	MGMT_EV_DEVICE_UNPAIRED,
137 	MGMT_EV_PASSKEY_NOTIFY,
138 	MGMT_EV_NEW_IRK,
139 	MGMT_EV_NEW_CSRK,
140 	MGMT_EV_DEVICE_ADDED,
141 	MGMT_EV_DEVICE_REMOVED,
142 	MGMT_EV_NEW_CONN_PARAM,
143 	MGMT_EV_UNCONF_INDEX_ADDED,
144 	MGMT_EV_UNCONF_INDEX_REMOVED,
145 	MGMT_EV_NEW_CONFIG_OPTIONS,
146 	MGMT_EV_EXT_INDEX_ADDED,
147 	MGMT_EV_EXT_INDEX_REMOVED,
148 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
149 	MGMT_EV_ADVERTISING_ADDED,
150 	MGMT_EV_ADVERTISING_REMOVED,
151 	MGMT_EV_EXT_INFO_CHANGED,
152 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
153 	MGMT_EV_EXP_FEATURE_CHANGED,
154 };
155 
156 static const u16 mgmt_untrusted_commands[] = {
157 	MGMT_OP_READ_INDEX_LIST,
158 	MGMT_OP_READ_INFO,
159 	MGMT_OP_READ_UNCONF_INDEX_LIST,
160 	MGMT_OP_READ_CONFIG_INFO,
161 	MGMT_OP_READ_EXT_INDEX_LIST,
162 	MGMT_OP_READ_EXT_INFO,
163 	MGMT_OP_READ_SECURITY_INFO,
164 	MGMT_OP_READ_EXP_FEATURES_INFO,
165 };
166 
167 static const u16 mgmt_untrusted_events[] = {
168 	MGMT_EV_INDEX_ADDED,
169 	MGMT_EV_INDEX_REMOVED,
170 	MGMT_EV_NEW_SETTINGS,
171 	MGMT_EV_CLASS_OF_DEV_CHANGED,
172 	MGMT_EV_LOCAL_NAME_CHANGED,
173 	MGMT_EV_UNCONF_INDEX_ADDED,
174 	MGMT_EV_UNCONF_INDEX_REMOVED,
175 	MGMT_EV_NEW_CONFIG_OPTIONS,
176 	MGMT_EV_EXT_INDEX_ADDED,
177 	MGMT_EV_EXT_INDEX_REMOVED,
178 	MGMT_EV_EXT_INFO_CHANGED,
179 	MGMT_EV_EXP_FEATURE_CHANGED,
180 };
181 
182 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
183 
184 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
185 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
186 
187 /* HCI to MGMT error code conversion table */
188 static const u8 mgmt_status_table[] = {
189 	MGMT_STATUS_SUCCESS,
190 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
191 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
192 	MGMT_STATUS_FAILED,		/* Hardware Failure */
193 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
194 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
195 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
196 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
197 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
198 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
199 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
200 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
201 	MGMT_STATUS_BUSY,		/* Command Disallowed */
202 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
203 	MGMT_STATUS_REJECTED,		/* Rejected Security */
204 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
205 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
206 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
207 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
208 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
209 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
210 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
211 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
212 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
213 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
214 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
215 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
216 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
217 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
218 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
219 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
220 	MGMT_STATUS_FAILED,		/* Unspecified Error */
221 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
222 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
223 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
224 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
225 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
226 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
227 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
228 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
229 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
230 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
231 	MGMT_STATUS_FAILED,		/* Transaction Collision */
232 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
233 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
234 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
235 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
236 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
237 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
238 	MGMT_STATUS_FAILED,		/* Slot Violation */
239 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
240 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
241 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
242 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
243 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
244 	MGMT_STATUS_BUSY,		/* Controller Busy */
245 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
246 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
247 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
248 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
249 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
250 };
251 
252 static u8 mgmt_status(u8 hci_status)
253 {
254 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
255 		return mgmt_status_table[hci_status];
256 
257 	return MGMT_STATUS_FAILED;
258 }
259 
260 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
261 			    u16 len, int flag)
262 {
263 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
264 			       flag, NULL);
265 }
266 
267 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
268 			      u16 len, int flag, struct sock *skip_sk)
269 {
270 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
271 			       flag, skip_sk);
272 }
273 
274 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
275 		      struct sock *skip_sk)
276 {
277 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
278 			       HCI_SOCK_TRUSTED, skip_sk);
279 }
280 
281 static u8 le_addr_type(u8 mgmt_addr_type)
282 {
283 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
284 		return ADDR_LE_DEV_PUBLIC;
285 	else
286 		return ADDR_LE_DEV_RANDOM;
287 }
288 
289 void mgmt_fill_version_info(void *ver)
290 {
291 	struct mgmt_rp_read_version *rp = ver;
292 
293 	rp->version = MGMT_VERSION;
294 	rp->revision = cpu_to_le16(MGMT_REVISION);
295 }
296 
297 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
298 			u16 data_len)
299 {
300 	struct mgmt_rp_read_version rp;
301 
302 	bt_dev_dbg(hdev, "sock %p", sk);
303 
304 	mgmt_fill_version_info(&rp);
305 
306 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
307 				 &rp, sizeof(rp));
308 }
309 
310 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
311 			 u16 data_len)
312 {
313 	struct mgmt_rp_read_commands *rp;
314 	u16 num_commands, num_events;
315 	size_t rp_size;
316 	int i, err;
317 
318 	bt_dev_dbg(hdev, "sock %p", sk);
319 
320 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
321 		num_commands = ARRAY_SIZE(mgmt_commands);
322 		num_events = ARRAY_SIZE(mgmt_events);
323 	} else {
324 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
325 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
326 	}
327 
328 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
329 
330 	rp = kmalloc(rp_size, GFP_KERNEL);
331 	if (!rp)
332 		return -ENOMEM;
333 
334 	rp->num_commands = cpu_to_le16(num_commands);
335 	rp->num_events = cpu_to_le16(num_events);
336 
337 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
338 		__le16 *opcode = rp->opcodes;
339 
340 		for (i = 0; i < num_commands; i++, opcode++)
341 			put_unaligned_le16(mgmt_commands[i], opcode);
342 
343 		for (i = 0; i < num_events; i++, opcode++)
344 			put_unaligned_le16(mgmt_events[i], opcode);
345 	} else {
346 		__le16 *opcode = rp->opcodes;
347 
348 		for (i = 0; i < num_commands; i++, opcode++)
349 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
350 
351 		for (i = 0; i < num_events; i++, opcode++)
352 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
353 	}
354 
355 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
356 				rp, rp_size);
357 	kfree(rp);
358 
359 	return err;
360 }
361 
362 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
363 			   u16 data_len)
364 {
365 	struct mgmt_rp_read_index_list *rp;
366 	struct hci_dev *d;
367 	size_t rp_len;
368 	u16 count;
369 	int err;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	read_lock(&hci_dev_list_lock);
374 
375 	count = 0;
376 	list_for_each_entry(d, &hci_dev_list, list) {
377 		if (d->dev_type == HCI_PRIMARY &&
378 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
379 			count++;
380 	}
381 
382 	rp_len = sizeof(*rp) + (2 * count);
383 	rp = kmalloc(rp_len, GFP_ATOMIC);
384 	if (!rp) {
385 		read_unlock(&hci_dev_list_lock);
386 		return -ENOMEM;
387 	}
388 
389 	count = 0;
390 	list_for_each_entry(d, &hci_dev_list, list) {
391 		if (hci_dev_test_flag(d, HCI_SETUP) ||
392 		    hci_dev_test_flag(d, HCI_CONFIG) ||
393 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
394 			continue;
395 
396 		/* Devices marked as raw-only are neither configured
397 		 * nor unconfigured controllers.
398 		 */
399 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
400 			continue;
401 
402 		if (d->dev_type == HCI_PRIMARY &&
403 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
404 			rp->index[count++] = cpu_to_le16(d->id);
405 			bt_dev_dbg(hdev, "Added hci%u", d->id);
406 		}
407 	}
408 
409 	rp->num_controllers = cpu_to_le16(count);
410 	rp_len = sizeof(*rp) + (2 * count);
411 
412 	read_unlock(&hci_dev_list_lock);
413 
414 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
415 				0, rp, rp_len);
416 
417 	kfree(rp);
418 
419 	return err;
420 }
421 
422 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
423 				  void *data, u16 data_len)
424 {
425 	struct mgmt_rp_read_unconf_index_list *rp;
426 	struct hci_dev *d;
427 	size_t rp_len;
428 	u16 count;
429 	int err;
430 
431 	bt_dev_dbg(hdev, "sock %p", sk);
432 
433 	read_lock(&hci_dev_list_lock);
434 
435 	count = 0;
436 	list_for_each_entry(d, &hci_dev_list, list) {
437 		if (d->dev_type == HCI_PRIMARY &&
438 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
439 			count++;
440 	}
441 
442 	rp_len = sizeof(*rp) + (2 * count);
443 	rp = kmalloc(rp_len, GFP_ATOMIC);
444 	if (!rp) {
445 		read_unlock(&hci_dev_list_lock);
446 		return -ENOMEM;
447 	}
448 
449 	count = 0;
450 	list_for_each_entry(d, &hci_dev_list, list) {
451 		if (hci_dev_test_flag(d, HCI_SETUP) ||
452 		    hci_dev_test_flag(d, HCI_CONFIG) ||
453 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
454 			continue;
455 
456 		/* Devices marked as raw-only are neither configured
457 		 * nor unconfigured controllers.
458 		 */
459 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
460 			continue;
461 
462 		if (d->dev_type == HCI_PRIMARY &&
463 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
464 			rp->index[count++] = cpu_to_le16(d->id);
465 			bt_dev_dbg(hdev, "Added hci%u", d->id);
466 		}
467 	}
468 
469 	rp->num_controllers = cpu_to_le16(count);
470 	rp_len = sizeof(*rp) + (2 * count);
471 
472 	read_unlock(&hci_dev_list_lock);
473 
474 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
475 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
476 
477 	kfree(rp);
478 
479 	return err;
480 }
481 
482 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
483 			       void *data, u16 data_len)
484 {
485 	struct mgmt_rp_read_ext_index_list *rp;
486 	struct hci_dev *d;
487 	u16 count;
488 	int err;
489 
490 	bt_dev_dbg(hdev, "sock %p", sk);
491 
492 	read_lock(&hci_dev_list_lock);
493 
494 	count = 0;
495 	list_for_each_entry(d, &hci_dev_list, list) {
496 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
497 			count++;
498 	}
499 
500 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
501 	if (!rp) {
502 		read_unlock(&hci_dev_list_lock);
503 		return -ENOMEM;
504 	}
505 
506 	count = 0;
507 	list_for_each_entry(d, &hci_dev_list, list) {
508 		if (hci_dev_test_flag(d, HCI_SETUP) ||
509 		    hci_dev_test_flag(d, HCI_CONFIG) ||
510 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
511 			continue;
512 
513 		/* Devices marked as raw-only are neither configured
514 		 * nor unconfigured controllers.
515 		 */
516 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
517 			continue;
518 
519 		if (d->dev_type == HCI_PRIMARY) {
520 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
521 				rp->entry[count].type = 0x01;
522 			else
523 				rp->entry[count].type = 0x00;
524 		} else if (d->dev_type == HCI_AMP) {
525 			rp->entry[count].type = 0x02;
526 		} else {
527 			continue;
528 		}
529 
530 		rp->entry[count].bus = d->bus;
531 		rp->entry[count++].index = cpu_to_le16(d->id);
532 		bt_dev_dbg(hdev, "Added hci%u", d->id);
533 	}
534 
535 	rp->num_controllers = cpu_to_le16(count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	/* If this command is called at least once, then all the
540 	 * default index and unconfigured index events are disabled
541 	 * and from now on only extended index events are used.
542 	 */
543 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
544 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
545 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
546 
547 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
549 				struct_size(rp, entry, count));
550 
551 	kfree(rp);
552 
553 	return err;
554 }
555 
556 static bool is_configured(struct hci_dev *hdev)
557 {
558 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
559 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
560 		return false;
561 
562 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
563 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
564 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
565 		return false;
566 
567 	return true;
568 }
569 
570 static __le32 get_missing_options(struct hci_dev *hdev)
571 {
572 	u32 options = 0;
573 
574 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
575 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
576 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
577 
578 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
579 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
580 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
581 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
582 
583 	return cpu_to_le32(options);
584 }
585 
586 static int new_options(struct hci_dev *hdev, struct sock *skip)
587 {
588 	__le32 options = get_missing_options(hdev);
589 
590 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
591 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
592 }
593 
594 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
595 {
596 	__le32 options = get_missing_options(hdev);
597 
598 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
599 				 sizeof(options));
600 }
601 
602 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
603 			    void *data, u16 data_len)
604 {
605 	struct mgmt_rp_read_config_info rp;
606 	u32 options = 0;
607 
608 	bt_dev_dbg(hdev, "sock %p", sk);
609 
610 	hci_dev_lock(hdev);
611 
612 	memset(&rp, 0, sizeof(rp));
613 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
614 
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
616 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
617 
618 	if (hdev->set_bdaddr)
619 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
620 
621 	rp.supported_options = cpu_to_le32(options);
622 	rp.missing_options = get_missing_options(hdev);
623 
624 	hci_dev_unlock(hdev);
625 
626 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
627 				 &rp, sizeof(rp));
628 }
629 
630 static u32 get_supported_phys(struct hci_dev *hdev)
631 {
632 	u32 supported_phys = 0;
633 
634 	if (lmp_bredr_capable(hdev)) {
635 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
636 
637 		if (hdev->features[0][0] & LMP_3SLOT)
638 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
639 
640 		if (hdev->features[0][0] & LMP_5SLOT)
641 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
642 
643 		if (lmp_edr_2m_capable(hdev)) {
644 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
645 
646 			if (lmp_edr_3slot_capable(hdev))
647 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
648 
649 			if (lmp_edr_5slot_capable(hdev))
650 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
651 
652 			if (lmp_edr_3m_capable(hdev)) {
653 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
654 
655 				if (lmp_edr_3slot_capable(hdev))
656 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
657 
658 				if (lmp_edr_5slot_capable(hdev))
659 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
660 			}
661 		}
662 	}
663 
664 	if (lmp_le_capable(hdev)) {
665 		supported_phys |= MGMT_PHY_LE_1M_TX;
666 		supported_phys |= MGMT_PHY_LE_1M_RX;
667 
668 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
669 			supported_phys |= MGMT_PHY_LE_2M_TX;
670 			supported_phys |= MGMT_PHY_LE_2M_RX;
671 		}
672 
673 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
674 			supported_phys |= MGMT_PHY_LE_CODED_TX;
675 			supported_phys |= MGMT_PHY_LE_CODED_RX;
676 		}
677 	}
678 
679 	return supported_phys;
680 }
681 
682 static u32 get_selected_phys(struct hci_dev *hdev)
683 {
684 	u32 selected_phys = 0;
685 
686 	if (lmp_bredr_capable(hdev)) {
687 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
688 
689 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
690 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
691 
692 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
693 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
694 
695 		if (lmp_edr_2m_capable(hdev)) {
696 			if (!(hdev->pkt_type & HCI_2DH1))
697 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
698 
699 			if (lmp_edr_3slot_capable(hdev) &&
700 			    !(hdev->pkt_type & HCI_2DH3))
701 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
702 
703 			if (lmp_edr_5slot_capable(hdev) &&
704 			    !(hdev->pkt_type & HCI_2DH5))
705 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
706 
707 			if (lmp_edr_3m_capable(hdev)) {
708 				if (!(hdev->pkt_type & HCI_3DH1))
709 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
710 
711 				if (lmp_edr_3slot_capable(hdev) &&
712 				    !(hdev->pkt_type & HCI_3DH3))
713 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev) &&
716 				    !(hdev->pkt_type & HCI_3DH5))
717 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
718 			}
719 		}
720 	}
721 
722 	if (lmp_le_capable(hdev)) {
723 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
724 			selected_phys |= MGMT_PHY_LE_1M_TX;
725 
726 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
727 			selected_phys |= MGMT_PHY_LE_1M_RX;
728 
729 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
730 			selected_phys |= MGMT_PHY_LE_2M_TX;
731 
732 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
733 			selected_phys |= MGMT_PHY_LE_2M_RX;
734 
735 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
736 			selected_phys |= MGMT_PHY_LE_CODED_TX;
737 
738 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
739 			selected_phys |= MGMT_PHY_LE_CODED_RX;
740 	}
741 
742 	return selected_phys;
743 }
744 
745 static u32 get_configurable_phys(struct hci_dev *hdev)
746 {
747 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
748 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
749 }
750 
751 static u32 get_supported_settings(struct hci_dev *hdev)
752 {
753 	u32 settings = 0;
754 
755 	settings |= MGMT_SETTING_POWERED;
756 	settings |= MGMT_SETTING_BONDABLE;
757 	settings |= MGMT_SETTING_DEBUG_KEYS;
758 	settings |= MGMT_SETTING_CONNECTABLE;
759 	settings |= MGMT_SETTING_DISCOVERABLE;
760 
761 	if (lmp_bredr_capable(hdev)) {
762 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
763 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
764 		settings |= MGMT_SETTING_BREDR;
765 		settings |= MGMT_SETTING_LINK_SECURITY;
766 
767 		if (lmp_ssp_capable(hdev)) {
768 			settings |= MGMT_SETTING_SSP;
769 			settings |= MGMT_SETTING_HS;
770 		}
771 
772 		if (lmp_sc_capable(hdev))
773 			settings |= MGMT_SETTING_SECURE_CONN;
774 
775 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
776 			     &hdev->quirks))
777 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
778 	}
779 
780 	if (lmp_le_capable(hdev)) {
781 		settings |= MGMT_SETTING_LE;
782 		settings |= MGMT_SETTING_ADVERTISING;
783 		settings |= MGMT_SETTING_SECURE_CONN;
784 		settings |= MGMT_SETTING_PRIVACY;
785 		settings |= MGMT_SETTING_STATIC_ADDRESS;
786 	}
787 
788 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
789 	    hdev->set_bdaddr)
790 		settings |= MGMT_SETTING_CONFIGURATION;
791 
792 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
793 
794 	return settings;
795 }
796 
797 static u32 get_current_settings(struct hci_dev *hdev)
798 {
799 	u32 settings = 0;
800 
801 	if (hdev_is_powered(hdev))
802 		settings |= MGMT_SETTING_POWERED;
803 
804 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
805 		settings |= MGMT_SETTING_CONNECTABLE;
806 
807 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
808 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
809 
810 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
811 		settings |= MGMT_SETTING_DISCOVERABLE;
812 
813 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
814 		settings |= MGMT_SETTING_BONDABLE;
815 
816 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
817 		settings |= MGMT_SETTING_BREDR;
818 
819 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
820 		settings |= MGMT_SETTING_LE;
821 
822 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
823 		settings |= MGMT_SETTING_LINK_SECURITY;
824 
825 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
826 		settings |= MGMT_SETTING_SSP;
827 
828 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
829 		settings |= MGMT_SETTING_HS;
830 
831 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
832 		settings |= MGMT_SETTING_ADVERTISING;
833 
834 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
835 		settings |= MGMT_SETTING_SECURE_CONN;
836 
837 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
838 		settings |= MGMT_SETTING_DEBUG_KEYS;
839 
840 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
841 		settings |= MGMT_SETTING_PRIVACY;
842 
843 	/* The current setting for static address has two purposes. The
844 	 * first is to indicate if the static address will be used and
845 	 * the second is to indicate if it is actually set.
846 	 *
847 	 * This means if the static address is not configured, this flag
848 	 * will never be set. If the address is configured, then if the
849 	 * address is actually used decides if the flag is set or not.
850 	 *
851 	 * For single mode LE only controllers and dual-mode controllers
852 	 * with BR/EDR disabled, the existence of the static address will
853 	 * be evaluated.
854 	 */
855 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
856 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
857 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
858 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
859 			settings |= MGMT_SETTING_STATIC_ADDRESS;
860 	}
861 
862 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
863 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
864 
865 	return settings;
866 }
867 
868 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
869 {
870 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
871 }
872 
873 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
874 						  struct hci_dev *hdev,
875 						  const void *data)
876 {
877 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
878 }
879 
880 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
881 {
882 	struct mgmt_pending_cmd *cmd;
883 
884 	/* If there's a pending mgmt command the flags will not yet have
885 	 * their final values, so check for this first.
886 	 */
887 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
888 	if (cmd) {
889 		struct mgmt_mode *cp = cmd->param;
890 		if (cp->val == 0x01)
891 			return LE_AD_GENERAL;
892 		else if (cp->val == 0x02)
893 			return LE_AD_LIMITED;
894 	} else {
895 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
896 			return LE_AD_LIMITED;
897 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
898 			return LE_AD_GENERAL;
899 	}
900 
901 	return 0;
902 }
903 
904 bool mgmt_get_connectable(struct hci_dev *hdev)
905 {
906 	struct mgmt_pending_cmd *cmd;
907 
908 	/* If there's a pending mgmt command the flag will not yet have
909 	 * it's final value, so check for this first.
910 	 */
911 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
912 	if (cmd) {
913 		struct mgmt_mode *cp = cmd->param;
914 
915 		return cp->val;
916 	}
917 
918 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
919 }
920 
921 static void service_cache_off(struct work_struct *work)
922 {
923 	struct hci_dev *hdev = container_of(work, struct hci_dev,
924 					    service_cache.work);
925 	struct hci_request req;
926 
927 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
928 		return;
929 
930 	hci_req_init(&req, hdev);
931 
932 	hci_dev_lock(hdev);
933 
934 	__hci_req_update_eir(&req);
935 	__hci_req_update_class(&req);
936 
937 	hci_dev_unlock(hdev);
938 
939 	hci_req_run(&req, NULL);
940 }
941 
942 static void rpa_expired(struct work_struct *work)
943 {
944 	struct hci_dev *hdev = container_of(work, struct hci_dev,
945 					    rpa_expired.work);
946 	struct hci_request req;
947 
948 	bt_dev_dbg(hdev, "");
949 
950 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
951 
952 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
953 		return;
954 
955 	/* The generation of a new RPA and programming it into the
956 	 * controller happens in the hci_req_enable_advertising()
957 	 * function.
958 	 */
959 	hci_req_init(&req, hdev);
960 	if (ext_adv_capable(hdev))
961 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
962 	else
963 		__hci_req_enable_advertising(&req);
964 	hci_req_run(&req, NULL);
965 }
966 
967 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
968 {
969 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
970 		return;
971 
972 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
973 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
974 
975 	/* Non-mgmt controlled devices get this bit set
976 	 * implicitly so that pairing works for them, however
977 	 * for mgmt we require user-space to explicitly enable
978 	 * it
979 	 */
980 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
981 }
982 
983 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
984 				void *data, u16 data_len)
985 {
986 	struct mgmt_rp_read_info rp;
987 
988 	bt_dev_dbg(hdev, "sock %p", sk);
989 
990 	hci_dev_lock(hdev);
991 
992 	memset(&rp, 0, sizeof(rp));
993 
994 	bacpy(&rp.bdaddr, &hdev->bdaddr);
995 
996 	rp.version = hdev->hci_ver;
997 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
998 
999 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1000 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1001 
1002 	memcpy(rp.dev_class, hdev->dev_class, 3);
1003 
1004 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1005 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1006 
1007 	hci_dev_unlock(hdev);
1008 
1009 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1010 				 sizeof(rp));
1011 }
1012 
1013 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1014 {
1015 	u16 eir_len = 0;
1016 	size_t name_len;
1017 
1018 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1019 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1020 					  hdev->dev_class, 3);
1021 
1022 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1023 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1024 					  hdev->appearance);
1025 
1026 	name_len = strlen(hdev->dev_name);
1027 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1028 				  hdev->dev_name, name_len);
1029 
1030 	name_len = strlen(hdev->short_name);
1031 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1032 				  hdev->short_name, name_len);
1033 
1034 	return eir_len;
1035 }
1036 
1037 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1038 				    void *data, u16 data_len)
1039 {
1040 	char buf[512];
1041 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1042 	u16 eir_len;
1043 
1044 	bt_dev_dbg(hdev, "sock %p", sk);
1045 
1046 	memset(&buf, 0, sizeof(buf));
1047 
1048 	hci_dev_lock(hdev);
1049 
1050 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1051 
1052 	rp->version = hdev->hci_ver;
1053 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1054 
1055 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1056 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1057 
1058 
1059 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1060 	rp->eir_len = cpu_to_le16(eir_len);
1061 
1062 	hci_dev_unlock(hdev);
1063 
1064 	/* If this command is called at least once, then the events
1065 	 * for class of device and local name changes are disabled
1066 	 * and only the new extended controller information event
1067 	 * is used.
1068 	 */
1069 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1070 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1071 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1072 
1073 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1074 				 sizeof(*rp) + eir_len);
1075 }
1076 
1077 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1078 {
1079 	char buf[512];
1080 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1081 	u16 eir_len;
1082 
1083 	memset(buf, 0, sizeof(buf));
1084 
1085 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1086 	ev->eir_len = cpu_to_le16(eir_len);
1087 
1088 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1089 				  sizeof(*ev) + eir_len,
1090 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1091 }
1092 
1093 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1094 {
1095 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1096 
1097 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1098 				 sizeof(settings));
1099 }
1100 
1101 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1102 {
1103 	bt_dev_dbg(hdev, "status 0x%02x", status);
1104 
1105 	if (hci_conn_count(hdev) == 0) {
1106 		cancel_delayed_work(&hdev->power_off);
1107 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1108 	}
1109 }
1110 
1111 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1112 {
1113 	struct mgmt_ev_advertising_added ev;
1114 
1115 	ev.instance = instance;
1116 
1117 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1118 }
1119 
1120 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1121 			      u8 instance)
1122 {
1123 	struct mgmt_ev_advertising_removed ev;
1124 
1125 	ev.instance = instance;
1126 
1127 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1128 }
1129 
1130 static void cancel_adv_timeout(struct hci_dev *hdev)
1131 {
1132 	if (hdev->adv_instance_timeout) {
1133 		hdev->adv_instance_timeout = 0;
1134 		cancel_delayed_work(&hdev->adv_instance_expire);
1135 	}
1136 }
1137 
1138 static int clean_up_hci_state(struct hci_dev *hdev)
1139 {
1140 	struct hci_request req;
1141 	struct hci_conn *conn;
1142 	bool discov_stopped;
1143 	int err;
1144 
1145 	hci_req_init(&req, hdev);
1146 
1147 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1148 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1149 		u8 scan = 0x00;
1150 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1151 	}
1152 
1153 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1154 
1155 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1156 		__hci_req_disable_advertising(&req);
1157 
1158 	discov_stopped = hci_req_stop_discovery(&req);
1159 
1160 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1161 		/* 0x15 == Terminated due to Power Off */
1162 		__hci_abort_conn(&req, conn, 0x15);
1163 	}
1164 
1165 	err = hci_req_run(&req, clean_up_hci_complete);
1166 	if (!err && discov_stopped)
1167 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1168 
1169 	return err;
1170 }
1171 
1172 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1173 		       u16 len)
1174 {
1175 	struct mgmt_mode *cp = data;
1176 	struct mgmt_pending_cmd *cmd;
1177 	int err;
1178 
1179 	bt_dev_dbg(hdev, "sock %p", sk);
1180 
1181 	if (cp->val != 0x00 && cp->val != 0x01)
1182 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1183 				       MGMT_STATUS_INVALID_PARAMS);
1184 
1185 	hci_dev_lock(hdev);
1186 
1187 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1188 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1189 				      MGMT_STATUS_BUSY);
1190 		goto failed;
1191 	}
1192 
1193 	if (!!cp->val == hdev_is_powered(hdev)) {
1194 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1195 		goto failed;
1196 	}
1197 
1198 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1199 	if (!cmd) {
1200 		err = -ENOMEM;
1201 		goto failed;
1202 	}
1203 
1204 	if (cp->val) {
1205 		queue_work(hdev->req_workqueue, &hdev->power_on);
1206 		err = 0;
1207 	} else {
1208 		/* Disconnect connections, stop scans, etc */
1209 		err = clean_up_hci_state(hdev);
1210 		if (!err)
1211 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1212 					   HCI_POWER_OFF_TIMEOUT);
1213 
1214 		/* ENODATA means there were no HCI commands queued */
1215 		if (err == -ENODATA) {
1216 			cancel_delayed_work(&hdev->power_off);
1217 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1218 			err = 0;
1219 		}
1220 	}
1221 
1222 failed:
1223 	hci_dev_unlock(hdev);
1224 	return err;
1225 }
1226 
1227 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1228 {
1229 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1230 
1231 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1232 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1233 }
1234 
1235 int mgmt_new_settings(struct hci_dev *hdev)
1236 {
1237 	return new_settings(hdev, NULL);
1238 }
1239 
1240 struct cmd_lookup {
1241 	struct sock *sk;
1242 	struct hci_dev *hdev;
1243 	u8 mgmt_status;
1244 };
1245 
1246 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1247 {
1248 	struct cmd_lookup *match = data;
1249 
1250 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1251 
1252 	list_del(&cmd->list);
1253 
1254 	if (match->sk == NULL) {
1255 		match->sk = cmd->sk;
1256 		sock_hold(match->sk);
1257 	}
1258 
1259 	mgmt_pending_free(cmd);
1260 }
1261 
1262 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1263 {
1264 	u8 *status = data;
1265 
1266 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1267 	mgmt_pending_remove(cmd);
1268 }
1269 
1270 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1271 {
1272 	if (cmd->cmd_complete) {
1273 		u8 *status = data;
1274 
1275 		cmd->cmd_complete(cmd, *status);
1276 		mgmt_pending_remove(cmd);
1277 
1278 		return;
1279 	}
1280 
1281 	cmd_status_rsp(cmd, data);
1282 }
1283 
1284 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1285 {
1286 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1287 				 cmd->param, cmd->param_len);
1288 }
1289 
1290 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1291 {
1292 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1293 				 cmd->param, sizeof(struct mgmt_addr_info));
1294 }
1295 
1296 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1297 {
1298 	if (!lmp_bredr_capable(hdev))
1299 		return MGMT_STATUS_NOT_SUPPORTED;
1300 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1301 		return MGMT_STATUS_REJECTED;
1302 	else
1303 		return MGMT_STATUS_SUCCESS;
1304 }
1305 
1306 static u8 mgmt_le_support(struct hci_dev *hdev)
1307 {
1308 	if (!lmp_le_capable(hdev))
1309 		return MGMT_STATUS_NOT_SUPPORTED;
1310 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1311 		return MGMT_STATUS_REJECTED;
1312 	else
1313 		return MGMT_STATUS_SUCCESS;
1314 }
1315 
1316 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1317 {
1318 	struct mgmt_pending_cmd *cmd;
1319 
1320 	bt_dev_dbg(hdev, "status 0x%02x", status);
1321 
1322 	hci_dev_lock(hdev);
1323 
1324 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1325 	if (!cmd)
1326 		goto unlock;
1327 
1328 	if (status) {
1329 		u8 mgmt_err = mgmt_status(status);
1330 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1331 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1332 		goto remove_cmd;
1333 	}
1334 
1335 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1336 	    hdev->discov_timeout > 0) {
1337 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1338 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1339 	}
1340 
1341 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1342 	new_settings(hdev, cmd->sk);
1343 
1344 remove_cmd:
1345 	mgmt_pending_remove(cmd);
1346 
1347 unlock:
1348 	hci_dev_unlock(hdev);
1349 }
1350 
1351 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1352 			    u16 len)
1353 {
1354 	struct mgmt_cp_set_discoverable *cp = data;
1355 	struct mgmt_pending_cmd *cmd;
1356 	u16 timeout;
1357 	int err;
1358 
1359 	bt_dev_dbg(hdev, "sock %p", sk);
1360 
1361 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1362 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1363 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1364 				       MGMT_STATUS_REJECTED);
1365 
1366 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1367 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1368 				       MGMT_STATUS_INVALID_PARAMS);
1369 
1370 	timeout = __le16_to_cpu(cp->timeout);
1371 
1372 	/* Disabling discoverable requires that no timeout is set,
1373 	 * and enabling limited discoverable requires a timeout.
1374 	 */
1375 	if ((cp->val == 0x00 && timeout > 0) ||
1376 	    (cp->val == 0x02 && timeout == 0))
1377 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1378 				       MGMT_STATUS_INVALID_PARAMS);
1379 
1380 	hci_dev_lock(hdev);
1381 
1382 	if (!hdev_is_powered(hdev) && timeout > 0) {
1383 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1384 				      MGMT_STATUS_NOT_POWERED);
1385 		goto failed;
1386 	}
1387 
1388 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1389 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1390 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 				      MGMT_STATUS_BUSY);
1392 		goto failed;
1393 	}
1394 
1395 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1396 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1397 				      MGMT_STATUS_REJECTED);
1398 		goto failed;
1399 	}
1400 
1401 	if (hdev->advertising_paused) {
1402 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1403 				      MGMT_STATUS_BUSY);
1404 		goto failed;
1405 	}
1406 
1407 	if (!hdev_is_powered(hdev)) {
1408 		bool changed = false;
1409 
1410 		/* Setting limited discoverable when powered off is
1411 		 * not a valid operation since it requires a timeout
1412 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1413 		 */
1414 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1415 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1416 			changed = true;
1417 		}
1418 
1419 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1420 		if (err < 0)
1421 			goto failed;
1422 
1423 		if (changed)
1424 			err = new_settings(hdev, sk);
1425 
1426 		goto failed;
1427 	}
1428 
1429 	/* If the current mode is the same, then just update the timeout
1430 	 * value with the new value. And if only the timeout gets updated,
1431 	 * then no need for any HCI transactions.
1432 	 */
1433 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1434 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1435 						   HCI_LIMITED_DISCOVERABLE)) {
1436 		cancel_delayed_work(&hdev->discov_off);
1437 		hdev->discov_timeout = timeout;
1438 
1439 		if (cp->val && hdev->discov_timeout > 0) {
1440 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1441 			queue_delayed_work(hdev->req_workqueue,
1442 					   &hdev->discov_off, to);
1443 		}
1444 
1445 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1446 		goto failed;
1447 	}
1448 
1449 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1450 	if (!cmd) {
1451 		err = -ENOMEM;
1452 		goto failed;
1453 	}
1454 
1455 	/* Cancel any potential discoverable timeout that might be
1456 	 * still active and store new timeout value. The arming of
1457 	 * the timeout happens in the complete handler.
1458 	 */
1459 	cancel_delayed_work(&hdev->discov_off);
1460 	hdev->discov_timeout = timeout;
1461 
1462 	if (cp->val)
1463 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1464 	else
1465 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1466 
1467 	/* Limited discoverable mode */
1468 	if (cp->val == 0x02)
1469 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1470 	else
1471 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1472 
1473 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1474 	err = 0;
1475 
1476 failed:
1477 	hci_dev_unlock(hdev);
1478 	return err;
1479 }
1480 
1481 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1482 {
1483 	struct mgmt_pending_cmd *cmd;
1484 
1485 	bt_dev_dbg(hdev, "status 0x%02x", status);
1486 
1487 	hci_dev_lock(hdev);
1488 
1489 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1490 	if (!cmd)
1491 		goto unlock;
1492 
1493 	if (status) {
1494 		u8 mgmt_err = mgmt_status(status);
1495 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1496 		goto remove_cmd;
1497 	}
1498 
1499 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1500 	new_settings(hdev, cmd->sk);
1501 
1502 remove_cmd:
1503 	mgmt_pending_remove(cmd);
1504 
1505 unlock:
1506 	hci_dev_unlock(hdev);
1507 }
1508 
1509 static int set_connectable_update_settings(struct hci_dev *hdev,
1510 					   struct sock *sk, u8 val)
1511 {
1512 	bool changed = false;
1513 	int err;
1514 
1515 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1516 		changed = true;
1517 
1518 	if (val) {
1519 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1520 	} else {
1521 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1522 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1523 	}
1524 
1525 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1526 	if (err < 0)
1527 		return err;
1528 
1529 	if (changed) {
1530 		hci_req_update_scan(hdev);
1531 		hci_update_background_scan(hdev);
1532 		return new_settings(hdev, sk);
1533 	}
1534 
1535 	return 0;
1536 }
1537 
1538 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1539 			   u16 len)
1540 {
1541 	struct mgmt_mode *cp = data;
1542 	struct mgmt_pending_cmd *cmd;
1543 	int err;
1544 
1545 	bt_dev_dbg(hdev, "sock %p", sk);
1546 
1547 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1548 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1549 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1550 				       MGMT_STATUS_REJECTED);
1551 
1552 	if (cp->val != 0x00 && cp->val != 0x01)
1553 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1554 				       MGMT_STATUS_INVALID_PARAMS);
1555 
1556 	hci_dev_lock(hdev);
1557 
1558 	if (!hdev_is_powered(hdev)) {
1559 		err = set_connectable_update_settings(hdev, sk, cp->val);
1560 		goto failed;
1561 	}
1562 
1563 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1564 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1565 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1566 				      MGMT_STATUS_BUSY);
1567 		goto failed;
1568 	}
1569 
1570 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1571 	if (!cmd) {
1572 		err = -ENOMEM;
1573 		goto failed;
1574 	}
1575 
1576 	if (cp->val) {
1577 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1578 	} else {
1579 		if (hdev->discov_timeout > 0)
1580 			cancel_delayed_work(&hdev->discov_off);
1581 
1582 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1583 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1584 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1585 	}
1586 
1587 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1588 	err = 0;
1589 
1590 failed:
1591 	hci_dev_unlock(hdev);
1592 	return err;
1593 }
1594 
1595 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1596 			u16 len)
1597 {
1598 	struct mgmt_mode *cp = data;
1599 	bool changed;
1600 	int err;
1601 
1602 	bt_dev_dbg(hdev, "sock %p", sk);
1603 
1604 	if (cp->val != 0x00 && cp->val != 0x01)
1605 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1606 				       MGMT_STATUS_INVALID_PARAMS);
1607 
1608 	hci_dev_lock(hdev);
1609 
1610 	if (cp->val)
1611 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1612 	else
1613 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1614 
1615 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1616 	if (err < 0)
1617 		goto unlock;
1618 
1619 	if (changed) {
1620 		/* In limited privacy mode the change of bondable mode
1621 		 * may affect the local advertising address.
1622 		 */
1623 		if (hdev_is_powered(hdev) &&
1624 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1625 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1626 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1627 			queue_work(hdev->req_workqueue,
1628 				   &hdev->discoverable_update);
1629 
1630 		err = new_settings(hdev, sk);
1631 	}
1632 
1633 unlock:
1634 	hci_dev_unlock(hdev);
1635 	return err;
1636 }
1637 
1638 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1639 			     u16 len)
1640 {
1641 	struct mgmt_mode *cp = data;
1642 	struct mgmt_pending_cmd *cmd;
1643 	u8 val, status;
1644 	int err;
1645 
1646 	bt_dev_dbg(hdev, "sock %p", sk);
1647 
1648 	status = mgmt_bredr_support(hdev);
1649 	if (status)
1650 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1651 				       status);
1652 
1653 	if (cp->val != 0x00 && cp->val != 0x01)
1654 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1655 				       MGMT_STATUS_INVALID_PARAMS);
1656 
1657 	hci_dev_lock(hdev);
1658 
1659 	if (!hdev_is_powered(hdev)) {
1660 		bool changed = false;
1661 
1662 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1663 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1664 			changed = true;
1665 		}
1666 
1667 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1668 		if (err < 0)
1669 			goto failed;
1670 
1671 		if (changed)
1672 			err = new_settings(hdev, sk);
1673 
1674 		goto failed;
1675 	}
1676 
1677 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1678 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1679 				      MGMT_STATUS_BUSY);
1680 		goto failed;
1681 	}
1682 
1683 	val = !!cp->val;
1684 
1685 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1686 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1687 		goto failed;
1688 	}
1689 
1690 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1691 	if (!cmd) {
1692 		err = -ENOMEM;
1693 		goto failed;
1694 	}
1695 
1696 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1697 	if (err < 0) {
1698 		mgmt_pending_remove(cmd);
1699 		goto failed;
1700 	}
1701 
1702 failed:
1703 	hci_dev_unlock(hdev);
1704 	return err;
1705 }
1706 
1707 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1708 {
1709 	struct mgmt_mode *cp = data;
1710 	struct mgmt_pending_cmd *cmd;
1711 	u8 status;
1712 	int err;
1713 
1714 	bt_dev_dbg(hdev, "sock %p", sk);
1715 
1716 	status = mgmt_bredr_support(hdev);
1717 	if (status)
1718 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1719 
1720 	if (!lmp_ssp_capable(hdev))
1721 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1722 				       MGMT_STATUS_NOT_SUPPORTED);
1723 
1724 	if (cp->val != 0x00 && cp->val != 0x01)
1725 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1726 				       MGMT_STATUS_INVALID_PARAMS);
1727 
1728 	hci_dev_lock(hdev);
1729 
1730 	if (!hdev_is_powered(hdev)) {
1731 		bool changed;
1732 
1733 		if (cp->val) {
1734 			changed = !hci_dev_test_and_set_flag(hdev,
1735 							     HCI_SSP_ENABLED);
1736 		} else {
1737 			changed = hci_dev_test_and_clear_flag(hdev,
1738 							      HCI_SSP_ENABLED);
1739 			if (!changed)
1740 				changed = hci_dev_test_and_clear_flag(hdev,
1741 								      HCI_HS_ENABLED);
1742 			else
1743 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1744 		}
1745 
1746 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1747 		if (err < 0)
1748 			goto failed;
1749 
1750 		if (changed)
1751 			err = new_settings(hdev, sk);
1752 
1753 		goto failed;
1754 	}
1755 
1756 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1757 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 				      MGMT_STATUS_BUSY);
1759 		goto failed;
1760 	}
1761 
1762 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1763 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1764 		goto failed;
1765 	}
1766 
1767 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1768 	if (!cmd) {
1769 		err = -ENOMEM;
1770 		goto failed;
1771 	}
1772 
1773 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1774 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1775 			     sizeof(cp->val), &cp->val);
1776 
1777 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1778 	if (err < 0) {
1779 		mgmt_pending_remove(cmd);
1780 		goto failed;
1781 	}
1782 
1783 failed:
1784 	hci_dev_unlock(hdev);
1785 	return err;
1786 }
1787 
1788 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1789 {
1790 	struct mgmt_mode *cp = data;
1791 	bool changed;
1792 	u8 status;
1793 	int err;
1794 
1795 	bt_dev_dbg(hdev, "sock %p", sk);
1796 
1797 	status = mgmt_bredr_support(hdev);
1798 	if (status)
1799 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1800 
1801 	if (!lmp_ssp_capable(hdev))
1802 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1803 				       MGMT_STATUS_NOT_SUPPORTED);
1804 
1805 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1806 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1807 				       MGMT_STATUS_REJECTED);
1808 
1809 	if (cp->val != 0x00 && cp->val != 0x01)
1810 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1811 				       MGMT_STATUS_INVALID_PARAMS);
1812 
1813 	hci_dev_lock(hdev);
1814 
1815 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1816 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1817 				      MGMT_STATUS_BUSY);
1818 		goto unlock;
1819 	}
1820 
1821 	if (cp->val) {
1822 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1823 	} else {
1824 		if (hdev_is_powered(hdev)) {
1825 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 					      MGMT_STATUS_REJECTED);
1827 			goto unlock;
1828 		}
1829 
1830 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1831 	}
1832 
1833 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1834 	if (err < 0)
1835 		goto unlock;
1836 
1837 	if (changed)
1838 		err = new_settings(hdev, sk);
1839 
1840 unlock:
1841 	hci_dev_unlock(hdev);
1842 	return err;
1843 }
1844 
1845 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1846 {
1847 	struct cmd_lookup match = { NULL, hdev };
1848 
1849 	hci_dev_lock(hdev);
1850 
1851 	if (status) {
1852 		u8 mgmt_err = mgmt_status(status);
1853 
1854 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1855 				     &mgmt_err);
1856 		goto unlock;
1857 	}
1858 
1859 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1860 
1861 	new_settings(hdev, match.sk);
1862 
1863 	if (match.sk)
1864 		sock_put(match.sk);
1865 
1866 	/* Make sure the controller has a good default for
1867 	 * advertising data. Restrict the update to when LE
1868 	 * has actually been enabled. During power on, the
1869 	 * update in powered_update_hci will take care of it.
1870 	 */
1871 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1872 		struct hci_request req;
1873 		hci_req_init(&req, hdev);
1874 		if (ext_adv_capable(hdev)) {
1875 			int err;
1876 
1877 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1878 			if (!err)
1879 				__hci_req_update_scan_rsp_data(&req, 0x00);
1880 		} else {
1881 			__hci_req_update_adv_data(&req, 0x00);
1882 			__hci_req_update_scan_rsp_data(&req, 0x00);
1883 		}
1884 		hci_req_run(&req, NULL);
1885 		hci_update_background_scan(hdev);
1886 	}
1887 
1888 unlock:
1889 	hci_dev_unlock(hdev);
1890 }
1891 
1892 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1893 {
1894 	struct mgmt_mode *cp = data;
1895 	struct hci_cp_write_le_host_supported hci_cp;
1896 	struct mgmt_pending_cmd *cmd;
1897 	struct hci_request req;
1898 	int err;
1899 	u8 val, enabled;
1900 
1901 	bt_dev_dbg(hdev, "sock %p", sk);
1902 
1903 	if (!lmp_le_capable(hdev))
1904 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1905 				       MGMT_STATUS_NOT_SUPPORTED);
1906 
1907 	if (cp->val != 0x00 && cp->val != 0x01)
1908 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1909 				       MGMT_STATUS_INVALID_PARAMS);
1910 
1911 	/* Bluetooth single mode LE only controllers or dual-mode
1912 	 * controllers configured as LE only devices, do not allow
1913 	 * switching LE off. These have either LE enabled explicitly
1914 	 * or BR/EDR has been previously switched off.
1915 	 *
1916 	 * When trying to enable an already enabled LE, then gracefully
1917 	 * send a positive response. Trying to disable it however will
1918 	 * result into rejection.
1919 	 */
1920 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1921 		if (cp->val == 0x01)
1922 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1923 
1924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1925 				       MGMT_STATUS_REJECTED);
1926 	}
1927 
1928 	hci_dev_lock(hdev);
1929 
1930 	val = !!cp->val;
1931 	enabled = lmp_host_le_capable(hdev);
1932 
1933 	if (!val)
1934 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1935 
1936 	if (!hdev_is_powered(hdev) || val == enabled) {
1937 		bool changed = false;
1938 
1939 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1940 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1941 			changed = true;
1942 		}
1943 
1944 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1945 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1946 			changed = true;
1947 		}
1948 
1949 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1950 		if (err < 0)
1951 			goto unlock;
1952 
1953 		if (changed)
1954 			err = new_settings(hdev, sk);
1955 
1956 		goto unlock;
1957 	}
1958 
1959 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1960 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1961 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1962 				      MGMT_STATUS_BUSY);
1963 		goto unlock;
1964 	}
1965 
1966 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1967 	if (!cmd) {
1968 		err = -ENOMEM;
1969 		goto unlock;
1970 	}
1971 
1972 	hci_req_init(&req, hdev);
1973 
1974 	memset(&hci_cp, 0, sizeof(hci_cp));
1975 
1976 	if (val) {
1977 		hci_cp.le = val;
1978 		hci_cp.simul = 0x00;
1979 	} else {
1980 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1981 			__hci_req_disable_advertising(&req);
1982 
1983 		if (ext_adv_capable(hdev))
1984 			__hci_req_clear_ext_adv_sets(&req);
1985 	}
1986 
1987 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1988 		    &hci_cp);
1989 
1990 	err = hci_req_run(&req, le_enable_complete);
1991 	if (err < 0)
1992 		mgmt_pending_remove(cmd);
1993 
1994 unlock:
1995 	hci_dev_unlock(hdev);
1996 	return err;
1997 }
1998 
1999 /* This is a helper function to test for pending mgmt commands that can
2000  * cause CoD or EIR HCI commands. We can only allow one such pending
2001  * mgmt command at a time since otherwise we cannot easily track what
2002  * the current values are, will be, and based on that calculate if a new
2003  * HCI command needs to be sent and if yes with what value.
2004  */
2005 static bool pending_eir_or_class(struct hci_dev *hdev)
2006 {
2007 	struct mgmt_pending_cmd *cmd;
2008 
2009 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2010 		switch (cmd->opcode) {
2011 		case MGMT_OP_ADD_UUID:
2012 		case MGMT_OP_REMOVE_UUID:
2013 		case MGMT_OP_SET_DEV_CLASS:
2014 		case MGMT_OP_SET_POWERED:
2015 			return true;
2016 		}
2017 	}
2018 
2019 	return false;
2020 }
2021 
2022 static const u8 bluetooth_base_uuid[] = {
2023 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2024 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2025 };
2026 
2027 static u8 get_uuid_size(const u8 *uuid)
2028 {
2029 	u32 val;
2030 
2031 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2032 		return 128;
2033 
2034 	val = get_unaligned_le32(&uuid[12]);
2035 	if (val > 0xffff)
2036 		return 32;
2037 
2038 	return 16;
2039 }
2040 
2041 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2042 {
2043 	struct mgmt_pending_cmd *cmd;
2044 
2045 	hci_dev_lock(hdev);
2046 
2047 	cmd = pending_find(mgmt_op, hdev);
2048 	if (!cmd)
2049 		goto unlock;
2050 
2051 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2052 			  mgmt_status(status), hdev->dev_class, 3);
2053 
2054 	mgmt_pending_remove(cmd);
2055 
2056 unlock:
2057 	hci_dev_unlock(hdev);
2058 }
2059 
2060 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2061 {
2062 	bt_dev_dbg(hdev, "status 0x%02x", status);
2063 
2064 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2065 }
2066 
2067 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 	struct mgmt_cp_add_uuid *cp = data;
2070 	struct mgmt_pending_cmd *cmd;
2071 	struct hci_request req;
2072 	struct bt_uuid *uuid;
2073 	int err;
2074 
2075 	bt_dev_dbg(hdev, "sock %p", sk);
2076 
2077 	hci_dev_lock(hdev);
2078 
2079 	if (pending_eir_or_class(hdev)) {
2080 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2081 				      MGMT_STATUS_BUSY);
2082 		goto failed;
2083 	}
2084 
2085 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2086 	if (!uuid) {
2087 		err = -ENOMEM;
2088 		goto failed;
2089 	}
2090 
2091 	memcpy(uuid->uuid, cp->uuid, 16);
2092 	uuid->svc_hint = cp->svc_hint;
2093 	uuid->size = get_uuid_size(cp->uuid);
2094 
2095 	list_add_tail(&uuid->list, &hdev->uuids);
2096 
2097 	hci_req_init(&req, hdev);
2098 
2099 	__hci_req_update_class(&req);
2100 	__hci_req_update_eir(&req);
2101 
2102 	err = hci_req_run(&req, add_uuid_complete);
2103 	if (err < 0) {
2104 		if (err != -ENODATA)
2105 			goto failed;
2106 
2107 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2108 					hdev->dev_class, 3);
2109 		goto failed;
2110 	}
2111 
2112 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2113 	if (!cmd) {
2114 		err = -ENOMEM;
2115 		goto failed;
2116 	}
2117 
2118 	err = 0;
2119 
2120 failed:
2121 	hci_dev_unlock(hdev);
2122 	return err;
2123 }
2124 
2125 static bool enable_service_cache(struct hci_dev *hdev)
2126 {
2127 	if (!hdev_is_powered(hdev))
2128 		return false;
2129 
2130 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2131 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2132 				   CACHE_TIMEOUT);
2133 		return true;
2134 	}
2135 
2136 	return false;
2137 }
2138 
2139 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2140 {
2141 	bt_dev_dbg(hdev, "status 0x%02x", status);
2142 
2143 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2144 }
2145 
2146 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2147 		       u16 len)
2148 {
2149 	struct mgmt_cp_remove_uuid *cp = data;
2150 	struct mgmt_pending_cmd *cmd;
2151 	struct bt_uuid *match, *tmp;
2152 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2153 	struct hci_request req;
2154 	int err, found;
2155 
2156 	bt_dev_dbg(hdev, "sock %p", sk);
2157 
2158 	hci_dev_lock(hdev);
2159 
2160 	if (pending_eir_or_class(hdev)) {
2161 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2162 				      MGMT_STATUS_BUSY);
2163 		goto unlock;
2164 	}
2165 
2166 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2167 		hci_uuids_clear(hdev);
2168 
2169 		if (enable_service_cache(hdev)) {
2170 			err = mgmt_cmd_complete(sk, hdev->id,
2171 						MGMT_OP_REMOVE_UUID,
2172 						0, hdev->dev_class, 3);
2173 			goto unlock;
2174 		}
2175 
2176 		goto update_class;
2177 	}
2178 
2179 	found = 0;
2180 
2181 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2182 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2183 			continue;
2184 
2185 		list_del(&match->list);
2186 		kfree(match);
2187 		found++;
2188 	}
2189 
2190 	if (found == 0) {
2191 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2192 				      MGMT_STATUS_INVALID_PARAMS);
2193 		goto unlock;
2194 	}
2195 
2196 update_class:
2197 	hci_req_init(&req, hdev);
2198 
2199 	__hci_req_update_class(&req);
2200 	__hci_req_update_eir(&req);
2201 
2202 	err = hci_req_run(&req, remove_uuid_complete);
2203 	if (err < 0) {
2204 		if (err != -ENODATA)
2205 			goto unlock;
2206 
2207 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2208 					hdev->dev_class, 3);
2209 		goto unlock;
2210 	}
2211 
2212 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2213 	if (!cmd) {
2214 		err = -ENOMEM;
2215 		goto unlock;
2216 	}
2217 
2218 	err = 0;
2219 
2220 unlock:
2221 	hci_dev_unlock(hdev);
2222 	return err;
2223 }
2224 
2225 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2226 {
2227 	bt_dev_dbg(hdev, "status 0x%02x", status);
2228 
2229 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2230 }
2231 
2232 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2233 			 u16 len)
2234 {
2235 	struct mgmt_cp_set_dev_class *cp = data;
2236 	struct mgmt_pending_cmd *cmd;
2237 	struct hci_request req;
2238 	int err;
2239 
2240 	bt_dev_dbg(hdev, "sock %p", sk);
2241 
2242 	if (!lmp_bredr_capable(hdev))
2243 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2244 				       MGMT_STATUS_NOT_SUPPORTED);
2245 
2246 	hci_dev_lock(hdev);
2247 
2248 	if (pending_eir_or_class(hdev)) {
2249 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2250 				      MGMT_STATUS_BUSY);
2251 		goto unlock;
2252 	}
2253 
2254 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2255 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2256 				      MGMT_STATUS_INVALID_PARAMS);
2257 		goto unlock;
2258 	}
2259 
2260 	hdev->major_class = cp->major;
2261 	hdev->minor_class = cp->minor;
2262 
2263 	if (!hdev_is_powered(hdev)) {
2264 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2265 					hdev->dev_class, 3);
2266 		goto unlock;
2267 	}
2268 
2269 	hci_req_init(&req, hdev);
2270 
2271 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2272 		hci_dev_unlock(hdev);
2273 		cancel_delayed_work_sync(&hdev->service_cache);
2274 		hci_dev_lock(hdev);
2275 		__hci_req_update_eir(&req);
2276 	}
2277 
2278 	__hci_req_update_class(&req);
2279 
2280 	err = hci_req_run(&req, set_class_complete);
2281 	if (err < 0) {
2282 		if (err != -ENODATA)
2283 			goto unlock;
2284 
2285 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2286 					hdev->dev_class, 3);
2287 		goto unlock;
2288 	}
2289 
2290 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2291 	if (!cmd) {
2292 		err = -ENOMEM;
2293 		goto unlock;
2294 	}
2295 
2296 	err = 0;
2297 
2298 unlock:
2299 	hci_dev_unlock(hdev);
2300 	return err;
2301 }
2302 
2303 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2304 			  u16 len)
2305 {
2306 	struct mgmt_cp_load_link_keys *cp = data;
2307 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2308 				   sizeof(struct mgmt_link_key_info));
2309 	u16 key_count, expected_len;
2310 	bool changed;
2311 	int i;
2312 
2313 	bt_dev_dbg(hdev, "sock %p", sk);
2314 
2315 	if (!lmp_bredr_capable(hdev))
2316 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2317 				       MGMT_STATUS_NOT_SUPPORTED);
2318 
2319 	key_count = __le16_to_cpu(cp->key_count);
2320 	if (key_count > max_key_count) {
2321 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2322 			   key_count);
2323 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2324 				       MGMT_STATUS_INVALID_PARAMS);
2325 	}
2326 
2327 	expected_len = struct_size(cp, keys, key_count);
2328 	if (expected_len != len) {
2329 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2330 			   expected_len, len);
2331 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2332 				       MGMT_STATUS_INVALID_PARAMS);
2333 	}
2334 
2335 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2336 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2337 				       MGMT_STATUS_INVALID_PARAMS);
2338 
2339 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2340 		   key_count);
2341 
2342 	for (i = 0; i < key_count; i++) {
2343 		struct mgmt_link_key_info *key = &cp->keys[i];
2344 
2345 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2346 			return mgmt_cmd_status(sk, hdev->id,
2347 					       MGMT_OP_LOAD_LINK_KEYS,
2348 					       MGMT_STATUS_INVALID_PARAMS);
2349 	}
2350 
2351 	hci_dev_lock(hdev);
2352 
2353 	hci_link_keys_clear(hdev);
2354 
2355 	if (cp->debug_keys)
2356 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2357 	else
2358 		changed = hci_dev_test_and_clear_flag(hdev,
2359 						      HCI_KEEP_DEBUG_KEYS);
2360 
2361 	if (changed)
2362 		new_settings(hdev, NULL);
2363 
2364 	for (i = 0; i < key_count; i++) {
2365 		struct mgmt_link_key_info *key = &cp->keys[i];
2366 
2367 		if (hci_is_blocked_key(hdev,
2368 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2369 				       key->val)) {
2370 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2371 				    &key->addr.bdaddr);
2372 			continue;
2373 		}
2374 
2375 		/* Always ignore debug keys and require a new pairing if
2376 		 * the user wants to use them.
2377 		 */
2378 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2379 			continue;
2380 
2381 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2382 				 key->type, key->pin_len, NULL);
2383 	}
2384 
2385 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2386 
2387 	hci_dev_unlock(hdev);
2388 
2389 	return 0;
2390 }
2391 
2392 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2393 			   u8 addr_type, struct sock *skip_sk)
2394 {
2395 	struct mgmt_ev_device_unpaired ev;
2396 
2397 	bacpy(&ev.addr.bdaddr, bdaddr);
2398 	ev.addr.type = addr_type;
2399 
2400 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2401 			  skip_sk);
2402 }
2403 
2404 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2405 			 u16 len)
2406 {
2407 	struct mgmt_cp_unpair_device *cp = data;
2408 	struct mgmt_rp_unpair_device rp;
2409 	struct hci_conn_params *params;
2410 	struct mgmt_pending_cmd *cmd;
2411 	struct hci_conn *conn;
2412 	u8 addr_type;
2413 	int err;
2414 
2415 	memset(&rp, 0, sizeof(rp));
2416 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2417 	rp.addr.type = cp->addr.type;
2418 
2419 	if (!bdaddr_type_is_valid(cp->addr.type))
2420 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2421 					 MGMT_STATUS_INVALID_PARAMS,
2422 					 &rp, sizeof(rp));
2423 
2424 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2425 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2426 					 MGMT_STATUS_INVALID_PARAMS,
2427 					 &rp, sizeof(rp));
2428 
2429 	hci_dev_lock(hdev);
2430 
2431 	if (!hdev_is_powered(hdev)) {
2432 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2433 					MGMT_STATUS_NOT_POWERED, &rp,
2434 					sizeof(rp));
2435 		goto unlock;
2436 	}
2437 
2438 	if (cp->addr.type == BDADDR_BREDR) {
2439 		/* If disconnection is requested, then look up the
2440 		 * connection. If the remote device is connected, it
2441 		 * will be later used to terminate the link.
2442 		 *
2443 		 * Setting it to NULL explicitly will cause no
2444 		 * termination of the link.
2445 		 */
2446 		if (cp->disconnect)
2447 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2448 						       &cp->addr.bdaddr);
2449 		else
2450 			conn = NULL;
2451 
2452 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2453 		if (err < 0) {
2454 			err = mgmt_cmd_complete(sk, hdev->id,
2455 						MGMT_OP_UNPAIR_DEVICE,
2456 						MGMT_STATUS_NOT_PAIRED, &rp,
2457 						sizeof(rp));
2458 			goto unlock;
2459 		}
2460 
2461 		goto done;
2462 	}
2463 
2464 	/* LE address type */
2465 	addr_type = le_addr_type(cp->addr.type);
2466 
2467 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2468 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2469 	if (err < 0) {
2470 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2471 					MGMT_STATUS_NOT_PAIRED, &rp,
2472 					sizeof(rp));
2473 		goto unlock;
2474 	}
2475 
2476 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2477 	if (!conn) {
2478 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2479 		goto done;
2480 	}
2481 
2482 
2483 	/* Defer clearing up the connection parameters until closing to
2484 	 * give a chance of keeping them if a repairing happens.
2485 	 */
2486 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2487 
2488 	/* Disable auto-connection parameters if present */
2489 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2490 	if (params) {
2491 		if (params->explicit_connect)
2492 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2493 		else
2494 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2495 	}
2496 
2497 	/* If disconnection is not requested, then clear the connection
2498 	 * variable so that the link is not terminated.
2499 	 */
2500 	if (!cp->disconnect)
2501 		conn = NULL;
2502 
2503 done:
2504 	/* If the connection variable is set, then termination of the
2505 	 * link is requested.
2506 	 */
2507 	if (!conn) {
2508 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2509 					&rp, sizeof(rp));
2510 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2511 		goto unlock;
2512 	}
2513 
2514 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2515 			       sizeof(*cp));
2516 	if (!cmd) {
2517 		err = -ENOMEM;
2518 		goto unlock;
2519 	}
2520 
2521 	cmd->cmd_complete = addr_cmd_complete;
2522 
2523 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2524 	if (err < 0)
2525 		mgmt_pending_remove(cmd);
2526 
2527 unlock:
2528 	hci_dev_unlock(hdev);
2529 	return err;
2530 }
2531 
2532 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2533 		      u16 len)
2534 {
2535 	struct mgmt_cp_disconnect *cp = data;
2536 	struct mgmt_rp_disconnect rp;
2537 	struct mgmt_pending_cmd *cmd;
2538 	struct hci_conn *conn;
2539 	int err;
2540 
2541 	bt_dev_dbg(hdev, "sock %p", sk);
2542 
2543 	memset(&rp, 0, sizeof(rp));
2544 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2545 	rp.addr.type = cp->addr.type;
2546 
2547 	if (!bdaddr_type_is_valid(cp->addr.type))
2548 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2549 					 MGMT_STATUS_INVALID_PARAMS,
2550 					 &rp, sizeof(rp));
2551 
2552 	hci_dev_lock(hdev);
2553 
2554 	if (!test_bit(HCI_UP, &hdev->flags)) {
2555 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2556 					MGMT_STATUS_NOT_POWERED, &rp,
2557 					sizeof(rp));
2558 		goto failed;
2559 	}
2560 
2561 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2562 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2563 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2564 		goto failed;
2565 	}
2566 
2567 	if (cp->addr.type == BDADDR_BREDR)
2568 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2569 					       &cp->addr.bdaddr);
2570 	else
2571 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2572 					       le_addr_type(cp->addr.type));
2573 
2574 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2575 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2576 					MGMT_STATUS_NOT_CONNECTED, &rp,
2577 					sizeof(rp));
2578 		goto failed;
2579 	}
2580 
2581 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2582 	if (!cmd) {
2583 		err = -ENOMEM;
2584 		goto failed;
2585 	}
2586 
2587 	cmd->cmd_complete = generic_cmd_complete;
2588 
2589 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2590 	if (err < 0)
2591 		mgmt_pending_remove(cmd);
2592 
2593 failed:
2594 	hci_dev_unlock(hdev);
2595 	return err;
2596 }
2597 
2598 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2599 {
2600 	switch (link_type) {
2601 	case LE_LINK:
2602 		switch (addr_type) {
2603 		case ADDR_LE_DEV_PUBLIC:
2604 			return BDADDR_LE_PUBLIC;
2605 
2606 		default:
2607 			/* Fallback to LE Random address type */
2608 			return BDADDR_LE_RANDOM;
2609 		}
2610 
2611 	default:
2612 		/* Fallback to BR/EDR type */
2613 		return BDADDR_BREDR;
2614 	}
2615 }
2616 
2617 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2618 			   u16 data_len)
2619 {
2620 	struct mgmt_rp_get_connections *rp;
2621 	struct hci_conn *c;
2622 	int err;
2623 	u16 i;
2624 
2625 	bt_dev_dbg(hdev, "sock %p", sk);
2626 
2627 	hci_dev_lock(hdev);
2628 
2629 	if (!hdev_is_powered(hdev)) {
2630 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2631 				      MGMT_STATUS_NOT_POWERED);
2632 		goto unlock;
2633 	}
2634 
2635 	i = 0;
2636 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2637 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2638 			i++;
2639 	}
2640 
2641 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2642 	if (!rp) {
2643 		err = -ENOMEM;
2644 		goto unlock;
2645 	}
2646 
2647 	i = 0;
2648 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2649 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2650 			continue;
2651 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2652 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2653 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2654 			continue;
2655 		i++;
2656 	}
2657 
2658 	rp->conn_count = cpu_to_le16(i);
2659 
2660 	/* Recalculate length in case of filtered SCO connections, etc */
2661 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2662 				struct_size(rp, addr, i));
2663 
2664 	kfree(rp);
2665 
2666 unlock:
2667 	hci_dev_unlock(hdev);
2668 	return err;
2669 }
2670 
2671 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2672 				   struct mgmt_cp_pin_code_neg_reply *cp)
2673 {
2674 	struct mgmt_pending_cmd *cmd;
2675 	int err;
2676 
2677 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2678 			       sizeof(*cp));
2679 	if (!cmd)
2680 		return -ENOMEM;
2681 
2682 	cmd->cmd_complete = addr_cmd_complete;
2683 
2684 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2685 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2686 	if (err < 0)
2687 		mgmt_pending_remove(cmd);
2688 
2689 	return err;
2690 }
2691 
2692 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2693 			  u16 len)
2694 {
2695 	struct hci_conn *conn;
2696 	struct mgmt_cp_pin_code_reply *cp = data;
2697 	struct hci_cp_pin_code_reply reply;
2698 	struct mgmt_pending_cmd *cmd;
2699 	int err;
2700 
2701 	bt_dev_dbg(hdev, "sock %p", sk);
2702 
2703 	hci_dev_lock(hdev);
2704 
2705 	if (!hdev_is_powered(hdev)) {
2706 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2707 				      MGMT_STATUS_NOT_POWERED);
2708 		goto failed;
2709 	}
2710 
2711 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2712 	if (!conn) {
2713 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2714 				      MGMT_STATUS_NOT_CONNECTED);
2715 		goto failed;
2716 	}
2717 
2718 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2719 		struct mgmt_cp_pin_code_neg_reply ncp;
2720 
2721 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2722 
2723 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2724 
2725 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2726 		if (err >= 0)
2727 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2728 					      MGMT_STATUS_INVALID_PARAMS);
2729 
2730 		goto failed;
2731 	}
2732 
2733 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2734 	if (!cmd) {
2735 		err = -ENOMEM;
2736 		goto failed;
2737 	}
2738 
2739 	cmd->cmd_complete = addr_cmd_complete;
2740 
2741 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2742 	reply.pin_len = cp->pin_len;
2743 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2744 
2745 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2746 	if (err < 0)
2747 		mgmt_pending_remove(cmd);
2748 
2749 failed:
2750 	hci_dev_unlock(hdev);
2751 	return err;
2752 }
2753 
2754 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2755 			     u16 len)
2756 {
2757 	struct mgmt_cp_set_io_capability *cp = data;
2758 
2759 	bt_dev_dbg(hdev, "sock %p", sk);
2760 
2761 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2762 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2763 				       MGMT_STATUS_INVALID_PARAMS);
2764 
2765 	hci_dev_lock(hdev);
2766 
2767 	hdev->io_capability = cp->io_capability;
2768 
2769 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2770 
2771 	hci_dev_unlock(hdev);
2772 
2773 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2774 				 NULL, 0);
2775 }
2776 
2777 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2778 {
2779 	struct hci_dev *hdev = conn->hdev;
2780 	struct mgmt_pending_cmd *cmd;
2781 
2782 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2783 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2784 			continue;
2785 
2786 		if (cmd->user_data != conn)
2787 			continue;
2788 
2789 		return cmd;
2790 	}
2791 
2792 	return NULL;
2793 }
2794 
2795 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2796 {
2797 	struct mgmt_rp_pair_device rp;
2798 	struct hci_conn *conn = cmd->user_data;
2799 	int err;
2800 
2801 	bacpy(&rp.addr.bdaddr, &conn->dst);
2802 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2803 
2804 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2805 				status, &rp, sizeof(rp));
2806 
2807 	/* So we don't get further callbacks for this connection */
2808 	conn->connect_cfm_cb = NULL;
2809 	conn->security_cfm_cb = NULL;
2810 	conn->disconn_cfm_cb = NULL;
2811 
2812 	hci_conn_drop(conn);
2813 
2814 	/* The device is paired so there is no need to remove
2815 	 * its connection parameters anymore.
2816 	 */
2817 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2818 
2819 	hci_conn_put(conn);
2820 
2821 	return err;
2822 }
2823 
2824 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2825 {
2826 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2827 	struct mgmt_pending_cmd *cmd;
2828 
2829 	cmd = find_pairing(conn);
2830 	if (cmd) {
2831 		cmd->cmd_complete(cmd, status);
2832 		mgmt_pending_remove(cmd);
2833 	}
2834 }
2835 
2836 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2837 {
2838 	struct mgmt_pending_cmd *cmd;
2839 
2840 	BT_DBG("status %u", status);
2841 
2842 	cmd = find_pairing(conn);
2843 	if (!cmd) {
2844 		BT_DBG("Unable to find a pending command");
2845 		return;
2846 	}
2847 
2848 	cmd->cmd_complete(cmd, mgmt_status(status));
2849 	mgmt_pending_remove(cmd);
2850 }
2851 
2852 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2853 {
2854 	struct mgmt_pending_cmd *cmd;
2855 
2856 	BT_DBG("status %u", status);
2857 
2858 	if (!status)
2859 		return;
2860 
2861 	cmd = find_pairing(conn);
2862 	if (!cmd) {
2863 		BT_DBG("Unable to find a pending command");
2864 		return;
2865 	}
2866 
2867 	cmd->cmd_complete(cmd, mgmt_status(status));
2868 	mgmt_pending_remove(cmd);
2869 }
2870 
2871 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2872 		       u16 len)
2873 {
2874 	struct mgmt_cp_pair_device *cp = data;
2875 	struct mgmt_rp_pair_device rp;
2876 	struct mgmt_pending_cmd *cmd;
2877 	u8 sec_level, auth_type;
2878 	struct hci_conn *conn;
2879 	int err;
2880 
2881 	bt_dev_dbg(hdev, "sock %p", sk);
2882 
2883 	memset(&rp, 0, sizeof(rp));
2884 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2885 	rp.addr.type = cp->addr.type;
2886 
2887 	if (!bdaddr_type_is_valid(cp->addr.type))
2888 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2889 					 MGMT_STATUS_INVALID_PARAMS,
2890 					 &rp, sizeof(rp));
2891 
2892 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2893 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2894 					 MGMT_STATUS_INVALID_PARAMS,
2895 					 &rp, sizeof(rp));
2896 
2897 	hci_dev_lock(hdev);
2898 
2899 	if (!hdev_is_powered(hdev)) {
2900 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2901 					MGMT_STATUS_NOT_POWERED, &rp,
2902 					sizeof(rp));
2903 		goto unlock;
2904 	}
2905 
2906 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2907 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2908 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2909 					sizeof(rp));
2910 		goto unlock;
2911 	}
2912 
2913 	sec_level = BT_SECURITY_MEDIUM;
2914 	auth_type = HCI_AT_DEDICATED_BONDING;
2915 
2916 	if (cp->addr.type == BDADDR_BREDR) {
2917 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2918 				       auth_type);
2919 	} else {
2920 		u8 addr_type = le_addr_type(cp->addr.type);
2921 		struct hci_conn_params *p;
2922 
2923 		/* When pairing a new device, it is expected to remember
2924 		 * this device for future connections. Adding the connection
2925 		 * parameter information ahead of time allows tracking
2926 		 * of the slave preferred values and will speed up any
2927 		 * further connection establishment.
2928 		 *
2929 		 * If connection parameters already exist, then they
2930 		 * will be kept and this function does nothing.
2931 		 */
2932 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2933 
2934 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2935 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2936 
2937 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2938 					   addr_type, sec_level,
2939 					   HCI_LE_CONN_TIMEOUT);
2940 	}
2941 
2942 	if (IS_ERR(conn)) {
2943 		int status;
2944 
2945 		if (PTR_ERR(conn) == -EBUSY)
2946 			status = MGMT_STATUS_BUSY;
2947 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2948 			status = MGMT_STATUS_NOT_SUPPORTED;
2949 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2950 			status = MGMT_STATUS_REJECTED;
2951 		else
2952 			status = MGMT_STATUS_CONNECT_FAILED;
2953 
2954 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2955 					status, &rp, sizeof(rp));
2956 		goto unlock;
2957 	}
2958 
2959 	if (conn->connect_cfm_cb) {
2960 		hci_conn_drop(conn);
2961 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2962 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2963 		goto unlock;
2964 	}
2965 
2966 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2967 	if (!cmd) {
2968 		err = -ENOMEM;
2969 		hci_conn_drop(conn);
2970 		goto unlock;
2971 	}
2972 
2973 	cmd->cmd_complete = pairing_complete;
2974 
2975 	/* For LE, just connecting isn't a proof that the pairing finished */
2976 	if (cp->addr.type == BDADDR_BREDR) {
2977 		conn->connect_cfm_cb = pairing_complete_cb;
2978 		conn->security_cfm_cb = pairing_complete_cb;
2979 		conn->disconn_cfm_cb = pairing_complete_cb;
2980 	} else {
2981 		conn->connect_cfm_cb = le_pairing_complete_cb;
2982 		conn->security_cfm_cb = le_pairing_complete_cb;
2983 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2984 	}
2985 
2986 	conn->io_capability = cp->io_cap;
2987 	cmd->user_data = hci_conn_get(conn);
2988 
2989 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2990 	    hci_conn_security(conn, sec_level, auth_type, true)) {
2991 		cmd->cmd_complete(cmd, 0);
2992 		mgmt_pending_remove(cmd);
2993 	}
2994 
2995 	err = 0;
2996 
2997 unlock:
2998 	hci_dev_unlock(hdev);
2999 	return err;
3000 }
3001 
3002 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3003 			      u16 len)
3004 {
3005 	struct mgmt_addr_info *addr = data;
3006 	struct mgmt_pending_cmd *cmd;
3007 	struct hci_conn *conn;
3008 	int err;
3009 
3010 	bt_dev_dbg(hdev, "sock %p", sk);
3011 
3012 	hci_dev_lock(hdev);
3013 
3014 	if (!hdev_is_powered(hdev)) {
3015 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3016 				      MGMT_STATUS_NOT_POWERED);
3017 		goto unlock;
3018 	}
3019 
3020 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3021 	if (!cmd) {
3022 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3023 				      MGMT_STATUS_INVALID_PARAMS);
3024 		goto unlock;
3025 	}
3026 
3027 	conn = cmd->user_data;
3028 
3029 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3030 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3031 				      MGMT_STATUS_INVALID_PARAMS);
3032 		goto unlock;
3033 	}
3034 
3035 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3036 	mgmt_pending_remove(cmd);
3037 
3038 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3039 				addr, sizeof(*addr));
3040 unlock:
3041 	hci_dev_unlock(hdev);
3042 	return err;
3043 }
3044 
3045 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3046 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3047 			     u16 hci_op, __le32 passkey)
3048 {
3049 	struct mgmt_pending_cmd *cmd;
3050 	struct hci_conn *conn;
3051 	int err;
3052 
3053 	hci_dev_lock(hdev);
3054 
3055 	if (!hdev_is_powered(hdev)) {
3056 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3057 					MGMT_STATUS_NOT_POWERED, addr,
3058 					sizeof(*addr));
3059 		goto done;
3060 	}
3061 
3062 	if (addr->type == BDADDR_BREDR)
3063 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3064 	else
3065 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3066 					       le_addr_type(addr->type));
3067 
3068 	if (!conn) {
3069 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3070 					MGMT_STATUS_NOT_CONNECTED, addr,
3071 					sizeof(*addr));
3072 		goto done;
3073 	}
3074 
3075 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3076 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3077 		if (!err)
3078 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3079 						MGMT_STATUS_SUCCESS, addr,
3080 						sizeof(*addr));
3081 		else
3082 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3083 						MGMT_STATUS_FAILED, addr,
3084 						sizeof(*addr));
3085 
3086 		goto done;
3087 	}
3088 
3089 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3090 	if (!cmd) {
3091 		err = -ENOMEM;
3092 		goto done;
3093 	}
3094 
3095 	cmd->cmd_complete = addr_cmd_complete;
3096 
3097 	/* Continue with pairing via HCI */
3098 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3099 		struct hci_cp_user_passkey_reply cp;
3100 
3101 		bacpy(&cp.bdaddr, &addr->bdaddr);
3102 		cp.passkey = passkey;
3103 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3104 	} else
3105 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3106 				   &addr->bdaddr);
3107 
3108 	if (err < 0)
3109 		mgmt_pending_remove(cmd);
3110 
3111 done:
3112 	hci_dev_unlock(hdev);
3113 	return err;
3114 }
3115 
3116 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3117 			      void *data, u16 len)
3118 {
3119 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3120 
3121 	bt_dev_dbg(hdev, "sock %p", sk);
3122 
3123 	return user_pairing_resp(sk, hdev, &cp->addr,
3124 				MGMT_OP_PIN_CODE_NEG_REPLY,
3125 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3126 }
3127 
3128 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3129 			      u16 len)
3130 {
3131 	struct mgmt_cp_user_confirm_reply *cp = data;
3132 
3133 	bt_dev_dbg(hdev, "sock %p", sk);
3134 
3135 	if (len != sizeof(*cp))
3136 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3137 				       MGMT_STATUS_INVALID_PARAMS);
3138 
3139 	return user_pairing_resp(sk, hdev, &cp->addr,
3140 				 MGMT_OP_USER_CONFIRM_REPLY,
3141 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3142 }
3143 
3144 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3145 				  void *data, u16 len)
3146 {
3147 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3148 
3149 	bt_dev_dbg(hdev, "sock %p", sk);
3150 
3151 	return user_pairing_resp(sk, hdev, &cp->addr,
3152 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3153 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3154 }
3155 
3156 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3157 			      u16 len)
3158 {
3159 	struct mgmt_cp_user_passkey_reply *cp = data;
3160 
3161 	bt_dev_dbg(hdev, "sock %p", sk);
3162 
3163 	return user_pairing_resp(sk, hdev, &cp->addr,
3164 				 MGMT_OP_USER_PASSKEY_REPLY,
3165 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3166 }
3167 
3168 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3169 				  void *data, u16 len)
3170 {
3171 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3172 
3173 	bt_dev_dbg(hdev, "sock %p", sk);
3174 
3175 	return user_pairing_resp(sk, hdev, &cp->addr,
3176 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3177 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3178 }
3179 
3180 static void adv_expire(struct hci_dev *hdev, u32 flags)
3181 {
3182 	struct adv_info *adv_instance;
3183 	struct hci_request req;
3184 	int err;
3185 
3186 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3187 	if (!adv_instance)
3188 		return;
3189 
3190 	/* stop if current instance doesn't need to be changed */
3191 	if (!(adv_instance->flags & flags))
3192 		return;
3193 
3194 	cancel_adv_timeout(hdev);
3195 
3196 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3197 	if (!adv_instance)
3198 		return;
3199 
3200 	hci_req_init(&req, hdev);
3201 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3202 					      true);
3203 	if (err)
3204 		return;
3205 
3206 	hci_req_run(&req, NULL);
3207 }
3208 
3209 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3210 {
3211 	struct mgmt_cp_set_local_name *cp;
3212 	struct mgmt_pending_cmd *cmd;
3213 
3214 	bt_dev_dbg(hdev, "status 0x%02x", status);
3215 
3216 	hci_dev_lock(hdev);
3217 
3218 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3219 	if (!cmd)
3220 		goto unlock;
3221 
3222 	cp = cmd->param;
3223 
3224 	if (status) {
3225 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3226 			        mgmt_status(status));
3227 	} else {
3228 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3229 				  cp, sizeof(*cp));
3230 
3231 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3232 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3233 	}
3234 
3235 	mgmt_pending_remove(cmd);
3236 
3237 unlock:
3238 	hci_dev_unlock(hdev);
3239 }
3240 
3241 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3242 			  u16 len)
3243 {
3244 	struct mgmt_cp_set_local_name *cp = data;
3245 	struct mgmt_pending_cmd *cmd;
3246 	struct hci_request req;
3247 	int err;
3248 
3249 	bt_dev_dbg(hdev, "sock %p", sk);
3250 
3251 	hci_dev_lock(hdev);
3252 
3253 	/* If the old values are the same as the new ones just return a
3254 	 * direct command complete event.
3255 	 */
3256 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3257 	    !memcmp(hdev->short_name, cp->short_name,
3258 		    sizeof(hdev->short_name))) {
3259 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3260 					data, len);
3261 		goto failed;
3262 	}
3263 
3264 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3265 
3266 	if (!hdev_is_powered(hdev)) {
3267 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3268 
3269 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3270 					data, len);
3271 		if (err < 0)
3272 			goto failed;
3273 
3274 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3275 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3276 		ext_info_changed(hdev, sk);
3277 
3278 		goto failed;
3279 	}
3280 
3281 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3282 	if (!cmd) {
3283 		err = -ENOMEM;
3284 		goto failed;
3285 	}
3286 
3287 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3288 
3289 	hci_req_init(&req, hdev);
3290 
3291 	if (lmp_bredr_capable(hdev)) {
3292 		__hci_req_update_name(&req);
3293 		__hci_req_update_eir(&req);
3294 	}
3295 
3296 	/* The name is stored in the scan response data and so
3297 	 * no need to udpate the advertising data here.
3298 	 */
3299 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3300 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3301 
3302 	err = hci_req_run(&req, set_name_complete);
3303 	if (err < 0)
3304 		mgmt_pending_remove(cmd);
3305 
3306 failed:
3307 	hci_dev_unlock(hdev);
3308 	return err;
3309 }
3310 
3311 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3312 			  u16 len)
3313 {
3314 	struct mgmt_cp_set_appearance *cp = data;
3315 	u16 appearance;
3316 	int err;
3317 
3318 	bt_dev_dbg(hdev, "sock %p", sk);
3319 
3320 	if (!lmp_le_capable(hdev))
3321 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3322 				       MGMT_STATUS_NOT_SUPPORTED);
3323 
3324 	appearance = le16_to_cpu(cp->appearance);
3325 
3326 	hci_dev_lock(hdev);
3327 
3328 	if (hdev->appearance != appearance) {
3329 		hdev->appearance = appearance;
3330 
3331 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3332 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3333 
3334 		ext_info_changed(hdev, sk);
3335 	}
3336 
3337 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3338 				0);
3339 
3340 	hci_dev_unlock(hdev);
3341 
3342 	return err;
3343 }
3344 
3345 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3346 				 void *data, u16 len)
3347 {
3348 	struct mgmt_rp_get_phy_confguration rp;
3349 
3350 	bt_dev_dbg(hdev, "sock %p", sk);
3351 
3352 	hci_dev_lock(hdev);
3353 
3354 	memset(&rp, 0, sizeof(rp));
3355 
3356 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3357 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3358 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3359 
3360 	hci_dev_unlock(hdev);
3361 
3362 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3363 				 &rp, sizeof(rp));
3364 }
3365 
3366 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3367 {
3368 	struct mgmt_ev_phy_configuration_changed ev;
3369 
3370 	memset(&ev, 0, sizeof(ev));
3371 
3372 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3373 
3374 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3375 			  sizeof(ev), skip);
3376 }
3377 
3378 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3379 				     u16 opcode, struct sk_buff *skb)
3380 {
3381 	struct mgmt_pending_cmd *cmd;
3382 
3383 	bt_dev_dbg(hdev, "status 0x%02x", status);
3384 
3385 	hci_dev_lock(hdev);
3386 
3387 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3388 	if (!cmd)
3389 		goto unlock;
3390 
3391 	if (status) {
3392 		mgmt_cmd_status(cmd->sk, hdev->id,
3393 				MGMT_OP_SET_PHY_CONFIGURATION,
3394 				mgmt_status(status));
3395 	} else {
3396 		mgmt_cmd_complete(cmd->sk, hdev->id,
3397 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3398 				  NULL, 0);
3399 
3400 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3401 	}
3402 
3403 	mgmt_pending_remove(cmd);
3404 
3405 unlock:
3406 	hci_dev_unlock(hdev);
3407 }
3408 
3409 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3410 				 void *data, u16 len)
3411 {
3412 	struct mgmt_cp_set_phy_confguration *cp = data;
3413 	struct hci_cp_le_set_default_phy cp_phy;
3414 	struct mgmt_pending_cmd *cmd;
3415 	struct hci_request req;
3416 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3417 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3418 	bool changed = false;
3419 	int err;
3420 
3421 	bt_dev_dbg(hdev, "sock %p", sk);
3422 
3423 	configurable_phys = get_configurable_phys(hdev);
3424 	supported_phys = get_supported_phys(hdev);
3425 	selected_phys = __le32_to_cpu(cp->selected_phys);
3426 
3427 	if (selected_phys & ~supported_phys)
3428 		return mgmt_cmd_status(sk, hdev->id,
3429 				       MGMT_OP_SET_PHY_CONFIGURATION,
3430 				       MGMT_STATUS_INVALID_PARAMS);
3431 
3432 	unconfigure_phys = supported_phys & ~configurable_phys;
3433 
3434 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3435 		return mgmt_cmd_status(sk, hdev->id,
3436 				       MGMT_OP_SET_PHY_CONFIGURATION,
3437 				       MGMT_STATUS_INVALID_PARAMS);
3438 
3439 	if (selected_phys == get_selected_phys(hdev))
3440 		return mgmt_cmd_complete(sk, hdev->id,
3441 					 MGMT_OP_SET_PHY_CONFIGURATION,
3442 					 0, NULL, 0);
3443 
3444 	hci_dev_lock(hdev);
3445 
3446 	if (!hdev_is_powered(hdev)) {
3447 		err = mgmt_cmd_status(sk, hdev->id,
3448 				      MGMT_OP_SET_PHY_CONFIGURATION,
3449 				      MGMT_STATUS_REJECTED);
3450 		goto unlock;
3451 	}
3452 
3453 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3454 		err = mgmt_cmd_status(sk, hdev->id,
3455 				      MGMT_OP_SET_PHY_CONFIGURATION,
3456 				      MGMT_STATUS_BUSY);
3457 		goto unlock;
3458 	}
3459 
3460 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3461 		pkt_type |= (HCI_DH3 | HCI_DM3);
3462 	else
3463 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3464 
3465 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3466 		pkt_type |= (HCI_DH5 | HCI_DM5);
3467 	else
3468 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3469 
3470 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3471 		pkt_type &= ~HCI_2DH1;
3472 	else
3473 		pkt_type |= HCI_2DH1;
3474 
3475 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3476 		pkt_type &= ~HCI_2DH3;
3477 	else
3478 		pkt_type |= HCI_2DH3;
3479 
3480 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3481 		pkt_type &= ~HCI_2DH5;
3482 	else
3483 		pkt_type |= HCI_2DH5;
3484 
3485 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3486 		pkt_type &= ~HCI_3DH1;
3487 	else
3488 		pkt_type |= HCI_3DH1;
3489 
3490 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3491 		pkt_type &= ~HCI_3DH3;
3492 	else
3493 		pkt_type |= HCI_3DH3;
3494 
3495 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3496 		pkt_type &= ~HCI_3DH5;
3497 	else
3498 		pkt_type |= HCI_3DH5;
3499 
3500 	if (pkt_type != hdev->pkt_type) {
3501 		hdev->pkt_type = pkt_type;
3502 		changed = true;
3503 	}
3504 
3505 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3506 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3507 		if (changed)
3508 			mgmt_phy_configuration_changed(hdev, sk);
3509 
3510 		err = mgmt_cmd_complete(sk, hdev->id,
3511 					MGMT_OP_SET_PHY_CONFIGURATION,
3512 					0, NULL, 0);
3513 
3514 		goto unlock;
3515 	}
3516 
3517 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3518 			       len);
3519 	if (!cmd) {
3520 		err = -ENOMEM;
3521 		goto unlock;
3522 	}
3523 
3524 	hci_req_init(&req, hdev);
3525 
3526 	memset(&cp_phy, 0, sizeof(cp_phy));
3527 
3528 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3529 		cp_phy.all_phys |= 0x01;
3530 
3531 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3532 		cp_phy.all_phys |= 0x02;
3533 
3534 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3535 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3536 
3537 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3538 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3539 
3540 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3541 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3542 
3543 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3544 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3545 
3546 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3547 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3548 
3549 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3550 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3551 
3552 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3553 
3554 	err = hci_req_run_skb(&req, set_default_phy_complete);
3555 	if (err < 0)
3556 		mgmt_pending_remove(cmd);
3557 
3558 unlock:
3559 	hci_dev_unlock(hdev);
3560 
3561 	return err;
3562 }
3563 
3564 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3565 			    u16 len)
3566 {
3567 	int err = MGMT_STATUS_SUCCESS;
3568 	struct mgmt_cp_set_blocked_keys *keys = data;
3569 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3570 				   sizeof(struct mgmt_blocked_key_info));
3571 	u16 key_count, expected_len;
3572 	int i;
3573 
3574 	bt_dev_dbg(hdev, "sock %p", sk);
3575 
3576 	key_count = __le16_to_cpu(keys->key_count);
3577 	if (key_count > max_key_count) {
3578 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3579 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3580 				       MGMT_STATUS_INVALID_PARAMS);
3581 	}
3582 
3583 	expected_len = struct_size(keys, keys, key_count);
3584 	if (expected_len != len) {
3585 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3586 			   expected_len, len);
3587 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3588 				       MGMT_STATUS_INVALID_PARAMS);
3589 	}
3590 
3591 	hci_dev_lock(hdev);
3592 
3593 	hci_blocked_keys_clear(hdev);
3594 
3595 	for (i = 0; i < keys->key_count; ++i) {
3596 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3597 
3598 		if (!b) {
3599 			err = MGMT_STATUS_NO_RESOURCES;
3600 			break;
3601 		}
3602 
3603 		b->type = keys->keys[i].type;
3604 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3605 		list_add_rcu(&b->list, &hdev->blocked_keys);
3606 	}
3607 	hci_dev_unlock(hdev);
3608 
3609 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3610 				err, NULL, 0);
3611 }
3612 
3613 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3614 			       void *data, u16 len)
3615 {
3616 	struct mgmt_mode *cp = data;
3617 	int err;
3618 	bool changed = false;
3619 
3620 	bt_dev_dbg(hdev, "sock %p", sk);
3621 
3622 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3623 		return mgmt_cmd_status(sk, hdev->id,
3624 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3625 				       MGMT_STATUS_NOT_SUPPORTED);
3626 
3627 	if (cp->val != 0x00 && cp->val != 0x01)
3628 		return mgmt_cmd_status(sk, hdev->id,
3629 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3630 				       MGMT_STATUS_INVALID_PARAMS);
3631 
3632 	hci_dev_lock(hdev);
3633 
3634 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3635 		err = mgmt_cmd_status(sk, hdev->id,
3636 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3637 				      MGMT_STATUS_BUSY);
3638 		goto unlock;
3639 	}
3640 
3641 	if (hdev_is_powered(hdev) &&
3642 	    !!cp->val != hci_dev_test_flag(hdev,
3643 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3644 		err = mgmt_cmd_status(sk, hdev->id,
3645 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3646 				      MGMT_STATUS_REJECTED);
3647 		goto unlock;
3648 	}
3649 
3650 	if (cp->val)
3651 		changed = !hci_dev_test_and_set_flag(hdev,
3652 						   HCI_WIDEBAND_SPEECH_ENABLED);
3653 	else
3654 		changed = hci_dev_test_and_clear_flag(hdev,
3655 						   HCI_WIDEBAND_SPEECH_ENABLED);
3656 
3657 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3658 	if (err < 0)
3659 		goto unlock;
3660 
3661 	if (changed)
3662 		err = new_settings(hdev, sk);
3663 
3664 unlock:
3665 	hci_dev_unlock(hdev);
3666 	return err;
3667 }
3668 
3669 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3670 			      void *data, u16 data_len)
3671 {
3672 	char buf[16];
3673 	struct mgmt_rp_read_security_info *rp = (void *)buf;
3674 	u16 sec_len = 0;
3675 	u8 flags = 0;
3676 
3677 	bt_dev_dbg(hdev, "sock %p", sk);
3678 
3679 	memset(&buf, 0, sizeof(buf));
3680 
3681 	hci_dev_lock(hdev);
3682 
3683 	/* When the Read Simple Pairing Options command is supported, then
3684 	 * the remote public key validation is supported.
3685 	 */
3686 	if (hdev->commands[41] & 0x08)
3687 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3688 
3689 	flags |= 0x02;		/* Remote public key validation (LE) */
3690 
3691 	/* When the Read Encryption Key Size command is supported, then the
3692 	 * encryption key size is enforced.
3693 	 */
3694 	if (hdev->commands[20] & 0x10)
3695 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3696 
3697 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3698 
3699 	sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3700 
3701 	/* When the Read Simple Pairing Options command is supported, then
3702 	 * also max encryption key size information is provided.
3703 	 */
3704 	if (hdev->commands[41] & 0x08)
3705 		sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3706 					  hdev->max_enc_key_size);
3707 
3708 	sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3709 
3710 	rp->sec_len = cpu_to_le16(sec_len);
3711 
3712 	hci_dev_unlock(hdev);
3713 
3714 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3715 				 rp, sizeof(*rp) + sec_len);
3716 }
3717 
3718 #ifdef CONFIG_BT_FEATURE_DEBUG
3719 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3720 static const u8 debug_uuid[16] = {
3721 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3722 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3723 };
3724 #endif
3725 
3726 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3727 				  void *data, u16 data_len)
3728 {
3729 	char buf[42];
3730 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3731 	u16 idx = 0;
3732 
3733 	bt_dev_dbg(hdev, "sock %p", sk);
3734 
3735 	memset(&buf, 0, sizeof(buf));
3736 
3737 #ifdef CONFIG_BT_FEATURE_DEBUG
3738 	if (!hdev) {
3739 		u32 flags = bt_dbg_get() ? BIT(0) : 0;
3740 
3741 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3742 		rp->features[idx].flags = cpu_to_le32(flags);
3743 		idx++;
3744 	}
3745 #endif
3746 
3747 	rp->feature_count = cpu_to_le16(idx);
3748 
3749 	/* After reading the experimental features information, enable
3750 	 * the events to update client on any future change.
3751 	 */
3752 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3753 
3754 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3755 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3756 				 0, rp, sizeof(*rp) + (20 * idx));
3757 }
3758 
3759 #ifdef CONFIG_BT_FEATURE_DEBUG
3760 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3761 {
3762 	struct mgmt_ev_exp_feature_changed ev;
3763 
3764 	memset(&ev, 0, sizeof(ev));
3765 	memcpy(ev.uuid, debug_uuid, 16);
3766 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3767 
3768 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3769 				  &ev, sizeof(ev),
3770 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3771 }
3772 #endif
3773 
3774 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3775 			   void *data, u16 data_len)
3776 {
3777 	struct mgmt_cp_set_exp_feature *cp = data;
3778 	struct mgmt_rp_set_exp_feature rp;
3779 
3780 	bt_dev_dbg(hdev, "sock %p", sk);
3781 
3782 	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3783 		memset(rp.uuid, 0, 16);
3784 		rp.flags = cpu_to_le32(0);
3785 
3786 #ifdef CONFIG_BT_FEATURE_DEBUG
3787 		if (!hdev) {
3788 			bool changed = bt_dbg_get();
3789 
3790 			bt_dbg_set(false);
3791 
3792 			if (changed)
3793 				exp_debug_feature_changed(false, sk);
3794 		}
3795 #endif
3796 
3797 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3798 
3799 		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3800 					 MGMT_OP_SET_EXP_FEATURE, 0,
3801 					 &rp, sizeof(rp));
3802 	}
3803 
3804 #ifdef CONFIG_BT_FEATURE_DEBUG
3805 	if (!memcmp(cp->uuid, debug_uuid, 16)) {
3806 		bool val, changed;
3807 		int err;
3808 
3809 		/* Command requires to use the non-controller index */
3810 		if (hdev)
3811 			return mgmt_cmd_status(sk, hdev->id,
3812 					       MGMT_OP_SET_EXP_FEATURE,
3813 					       MGMT_STATUS_INVALID_INDEX);
3814 
3815 		/* Parameters are limited to a single octet */
3816 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3817 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3818 					       MGMT_OP_SET_EXP_FEATURE,
3819 					       MGMT_STATUS_INVALID_PARAMS);
3820 
3821 		/* Only boolean on/off is supported */
3822 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3823 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3824 					       MGMT_OP_SET_EXP_FEATURE,
3825 					       MGMT_STATUS_INVALID_PARAMS);
3826 
3827 		val = !!cp->param[0];
3828 		changed = val ? !bt_dbg_get() : bt_dbg_get();
3829 		bt_dbg_set(val);
3830 
3831 		memcpy(rp.uuid, debug_uuid, 16);
3832 		rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3833 
3834 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3835 
3836 		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3837 					MGMT_OP_SET_EXP_FEATURE, 0,
3838 					&rp, sizeof(rp));
3839 
3840 		if (changed)
3841 			exp_debug_feature_changed(val, sk);
3842 
3843 		return err;
3844 	}
3845 #endif
3846 
3847 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3848 			       MGMT_OP_SET_EXP_FEATURE,
3849 			       MGMT_STATUS_NOT_SUPPORTED);
3850 }
3851 
3852 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3853 				         u16 opcode, struct sk_buff *skb)
3854 {
3855 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3856 	size_t rp_size = sizeof(mgmt_rp);
3857 	struct mgmt_pending_cmd *cmd;
3858 
3859 	bt_dev_dbg(hdev, "status %u", status);
3860 
3861 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3862 	if (!cmd)
3863 		return;
3864 
3865 	if (status || !skb) {
3866 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3867 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3868 		goto remove;
3869 	}
3870 
3871 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3872 
3873 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3874 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3875 
3876 		if (skb->len < sizeof(*rp)) {
3877 			mgmt_cmd_status(cmd->sk, hdev->id,
3878 					MGMT_OP_READ_LOCAL_OOB_DATA,
3879 					MGMT_STATUS_FAILED);
3880 			goto remove;
3881 		}
3882 
3883 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3884 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3885 
3886 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3887 	} else {
3888 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3889 
3890 		if (skb->len < sizeof(*rp)) {
3891 			mgmt_cmd_status(cmd->sk, hdev->id,
3892 					MGMT_OP_READ_LOCAL_OOB_DATA,
3893 					MGMT_STATUS_FAILED);
3894 			goto remove;
3895 		}
3896 
3897 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3898 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3899 
3900 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3901 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3902 	}
3903 
3904 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3905 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3906 
3907 remove:
3908 	mgmt_pending_remove(cmd);
3909 }
3910 
3911 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3912 			       void *data, u16 data_len)
3913 {
3914 	struct mgmt_pending_cmd *cmd;
3915 	struct hci_request req;
3916 	int err;
3917 
3918 	bt_dev_dbg(hdev, "sock %p", sk);
3919 
3920 	hci_dev_lock(hdev);
3921 
3922 	if (!hdev_is_powered(hdev)) {
3923 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3924 				      MGMT_STATUS_NOT_POWERED);
3925 		goto unlock;
3926 	}
3927 
3928 	if (!lmp_ssp_capable(hdev)) {
3929 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3930 				      MGMT_STATUS_NOT_SUPPORTED);
3931 		goto unlock;
3932 	}
3933 
3934 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3935 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3936 				      MGMT_STATUS_BUSY);
3937 		goto unlock;
3938 	}
3939 
3940 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3941 	if (!cmd) {
3942 		err = -ENOMEM;
3943 		goto unlock;
3944 	}
3945 
3946 	hci_req_init(&req, hdev);
3947 
3948 	if (bredr_sc_enabled(hdev))
3949 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3950 	else
3951 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3952 
3953 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3954 	if (err < 0)
3955 		mgmt_pending_remove(cmd);
3956 
3957 unlock:
3958 	hci_dev_unlock(hdev);
3959 	return err;
3960 }
3961 
3962 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3963 			       void *data, u16 len)
3964 {
3965 	struct mgmt_addr_info *addr = data;
3966 	int err;
3967 
3968 	bt_dev_dbg(hdev, "sock %p", sk);
3969 
3970 	if (!bdaddr_type_is_valid(addr->type))
3971 		return mgmt_cmd_complete(sk, hdev->id,
3972 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3973 					 MGMT_STATUS_INVALID_PARAMS,
3974 					 addr, sizeof(*addr));
3975 
3976 	hci_dev_lock(hdev);
3977 
3978 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3979 		struct mgmt_cp_add_remote_oob_data *cp = data;
3980 		u8 status;
3981 
3982 		if (cp->addr.type != BDADDR_BREDR) {
3983 			err = mgmt_cmd_complete(sk, hdev->id,
3984 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3985 						MGMT_STATUS_INVALID_PARAMS,
3986 						&cp->addr, sizeof(cp->addr));
3987 			goto unlock;
3988 		}
3989 
3990 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3991 					      cp->addr.type, cp->hash,
3992 					      cp->rand, NULL, NULL);
3993 		if (err < 0)
3994 			status = MGMT_STATUS_FAILED;
3995 		else
3996 			status = MGMT_STATUS_SUCCESS;
3997 
3998 		err = mgmt_cmd_complete(sk, hdev->id,
3999 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4000 					&cp->addr, sizeof(cp->addr));
4001 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4002 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4003 		u8 *rand192, *hash192, *rand256, *hash256;
4004 		u8 status;
4005 
4006 		if (bdaddr_type_is_le(cp->addr.type)) {
4007 			/* Enforce zero-valued 192-bit parameters as
4008 			 * long as legacy SMP OOB isn't implemented.
4009 			 */
4010 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4011 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4012 				err = mgmt_cmd_complete(sk, hdev->id,
4013 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4014 							MGMT_STATUS_INVALID_PARAMS,
4015 							addr, sizeof(*addr));
4016 				goto unlock;
4017 			}
4018 
4019 			rand192 = NULL;
4020 			hash192 = NULL;
4021 		} else {
4022 			/* In case one of the P-192 values is set to zero,
4023 			 * then just disable OOB data for P-192.
4024 			 */
4025 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4026 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4027 				rand192 = NULL;
4028 				hash192 = NULL;
4029 			} else {
4030 				rand192 = cp->rand192;
4031 				hash192 = cp->hash192;
4032 			}
4033 		}
4034 
4035 		/* In case one of the P-256 values is set to zero, then just
4036 		 * disable OOB data for P-256.
4037 		 */
4038 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4039 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4040 			rand256 = NULL;
4041 			hash256 = NULL;
4042 		} else {
4043 			rand256 = cp->rand256;
4044 			hash256 = cp->hash256;
4045 		}
4046 
4047 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4048 					      cp->addr.type, hash192, rand192,
4049 					      hash256, rand256);
4050 		if (err < 0)
4051 			status = MGMT_STATUS_FAILED;
4052 		else
4053 			status = MGMT_STATUS_SUCCESS;
4054 
4055 		err = mgmt_cmd_complete(sk, hdev->id,
4056 					MGMT_OP_ADD_REMOTE_OOB_DATA,
4057 					status, &cp->addr, sizeof(cp->addr));
4058 	} else {
4059 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4060 			   len);
4061 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4062 				      MGMT_STATUS_INVALID_PARAMS);
4063 	}
4064 
4065 unlock:
4066 	hci_dev_unlock(hdev);
4067 	return err;
4068 }
4069 
4070 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4071 				  void *data, u16 len)
4072 {
4073 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4074 	u8 status;
4075 	int err;
4076 
4077 	bt_dev_dbg(hdev, "sock %p", sk);
4078 
4079 	if (cp->addr.type != BDADDR_BREDR)
4080 		return mgmt_cmd_complete(sk, hdev->id,
4081 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4082 					 MGMT_STATUS_INVALID_PARAMS,
4083 					 &cp->addr, sizeof(cp->addr));
4084 
4085 	hci_dev_lock(hdev);
4086 
4087 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4088 		hci_remote_oob_data_clear(hdev);
4089 		status = MGMT_STATUS_SUCCESS;
4090 		goto done;
4091 	}
4092 
4093 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4094 	if (err < 0)
4095 		status = MGMT_STATUS_INVALID_PARAMS;
4096 	else
4097 		status = MGMT_STATUS_SUCCESS;
4098 
4099 done:
4100 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4101 				status, &cp->addr, sizeof(cp->addr));
4102 
4103 	hci_dev_unlock(hdev);
4104 	return err;
4105 }
4106 
4107 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4108 {
4109 	struct mgmt_pending_cmd *cmd;
4110 
4111 	bt_dev_dbg(hdev, "status %d", status);
4112 
4113 	hci_dev_lock(hdev);
4114 
4115 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4116 	if (!cmd)
4117 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4118 
4119 	if (!cmd)
4120 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4121 
4122 	if (cmd) {
4123 		cmd->cmd_complete(cmd, mgmt_status(status));
4124 		mgmt_pending_remove(cmd);
4125 	}
4126 
4127 	hci_dev_unlock(hdev);
4128 
4129 	/* Handle suspend notifier */
4130 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4131 			       hdev->suspend_tasks)) {
4132 		bt_dev_dbg(hdev, "Unpaused discovery");
4133 		wake_up(&hdev->suspend_wait_q);
4134 	}
4135 }
4136 
4137 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4138 				    uint8_t *mgmt_status)
4139 {
4140 	switch (type) {
4141 	case DISCOV_TYPE_LE:
4142 		*mgmt_status = mgmt_le_support(hdev);
4143 		if (*mgmt_status)
4144 			return false;
4145 		break;
4146 	case DISCOV_TYPE_INTERLEAVED:
4147 		*mgmt_status = mgmt_le_support(hdev);
4148 		if (*mgmt_status)
4149 			return false;
4150 		/* Intentional fall-through */
4151 	case DISCOV_TYPE_BREDR:
4152 		*mgmt_status = mgmt_bredr_support(hdev);
4153 		if (*mgmt_status)
4154 			return false;
4155 		break;
4156 	default:
4157 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4158 		return false;
4159 	}
4160 
4161 	return true;
4162 }
4163 
4164 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4165 				    u16 op, void *data, u16 len)
4166 {
4167 	struct mgmt_cp_start_discovery *cp = data;
4168 	struct mgmt_pending_cmd *cmd;
4169 	u8 status;
4170 	int err;
4171 
4172 	bt_dev_dbg(hdev, "sock %p", sk);
4173 
4174 	hci_dev_lock(hdev);
4175 
4176 	if (!hdev_is_powered(hdev)) {
4177 		err = mgmt_cmd_complete(sk, hdev->id, op,
4178 					MGMT_STATUS_NOT_POWERED,
4179 					&cp->type, sizeof(cp->type));
4180 		goto failed;
4181 	}
4182 
4183 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4184 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4185 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4186 					&cp->type, sizeof(cp->type));
4187 		goto failed;
4188 	}
4189 
4190 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4191 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4192 					&cp->type, sizeof(cp->type));
4193 		goto failed;
4194 	}
4195 
4196 	/* Can't start discovery when it is paused */
4197 	if (hdev->discovery_paused) {
4198 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4199 					&cp->type, sizeof(cp->type));
4200 		goto failed;
4201 	}
4202 
4203 	/* Clear the discovery filter first to free any previously
4204 	 * allocated memory for the UUID list.
4205 	 */
4206 	hci_discovery_filter_clear(hdev);
4207 
4208 	hdev->discovery.type = cp->type;
4209 	hdev->discovery.report_invalid_rssi = false;
4210 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4211 		hdev->discovery.limited = true;
4212 	else
4213 		hdev->discovery.limited = false;
4214 
4215 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4216 	if (!cmd) {
4217 		err = -ENOMEM;
4218 		goto failed;
4219 	}
4220 
4221 	cmd->cmd_complete = generic_cmd_complete;
4222 
4223 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4224 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4225 	err = 0;
4226 
4227 failed:
4228 	hci_dev_unlock(hdev);
4229 	return err;
4230 }
4231 
4232 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4233 			   void *data, u16 len)
4234 {
4235 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4236 					data, len);
4237 }
4238 
4239 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4240 				   void *data, u16 len)
4241 {
4242 	return start_discovery_internal(sk, hdev,
4243 					MGMT_OP_START_LIMITED_DISCOVERY,
4244 					data, len);
4245 }
4246 
4247 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4248 					  u8 status)
4249 {
4250 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4251 				 cmd->param, 1);
4252 }
4253 
4254 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4255 				   void *data, u16 len)
4256 {
4257 	struct mgmt_cp_start_service_discovery *cp = data;
4258 	struct mgmt_pending_cmd *cmd;
4259 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4260 	u16 uuid_count, expected_len;
4261 	u8 status;
4262 	int err;
4263 
4264 	bt_dev_dbg(hdev, "sock %p", sk);
4265 
4266 	hci_dev_lock(hdev);
4267 
4268 	if (!hdev_is_powered(hdev)) {
4269 		err = mgmt_cmd_complete(sk, hdev->id,
4270 					MGMT_OP_START_SERVICE_DISCOVERY,
4271 					MGMT_STATUS_NOT_POWERED,
4272 					&cp->type, sizeof(cp->type));
4273 		goto failed;
4274 	}
4275 
4276 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4277 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4278 		err = mgmt_cmd_complete(sk, hdev->id,
4279 					MGMT_OP_START_SERVICE_DISCOVERY,
4280 					MGMT_STATUS_BUSY, &cp->type,
4281 					sizeof(cp->type));
4282 		goto failed;
4283 	}
4284 
4285 	uuid_count = __le16_to_cpu(cp->uuid_count);
4286 	if (uuid_count > max_uuid_count) {
4287 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4288 			   uuid_count);
4289 		err = mgmt_cmd_complete(sk, hdev->id,
4290 					MGMT_OP_START_SERVICE_DISCOVERY,
4291 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4292 					sizeof(cp->type));
4293 		goto failed;
4294 	}
4295 
4296 	expected_len = sizeof(*cp) + uuid_count * 16;
4297 	if (expected_len != len) {
4298 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4299 			   expected_len, len);
4300 		err = mgmt_cmd_complete(sk, hdev->id,
4301 					MGMT_OP_START_SERVICE_DISCOVERY,
4302 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4303 					sizeof(cp->type));
4304 		goto failed;
4305 	}
4306 
4307 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4308 		err = mgmt_cmd_complete(sk, hdev->id,
4309 					MGMT_OP_START_SERVICE_DISCOVERY,
4310 					status, &cp->type, sizeof(cp->type));
4311 		goto failed;
4312 	}
4313 
4314 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4315 			       hdev, data, len);
4316 	if (!cmd) {
4317 		err = -ENOMEM;
4318 		goto failed;
4319 	}
4320 
4321 	cmd->cmd_complete = service_discovery_cmd_complete;
4322 
4323 	/* Clear the discovery filter first to free any previously
4324 	 * allocated memory for the UUID list.
4325 	 */
4326 	hci_discovery_filter_clear(hdev);
4327 
4328 	hdev->discovery.result_filtering = true;
4329 	hdev->discovery.type = cp->type;
4330 	hdev->discovery.rssi = cp->rssi;
4331 	hdev->discovery.uuid_count = uuid_count;
4332 
4333 	if (uuid_count > 0) {
4334 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4335 						GFP_KERNEL);
4336 		if (!hdev->discovery.uuids) {
4337 			err = mgmt_cmd_complete(sk, hdev->id,
4338 						MGMT_OP_START_SERVICE_DISCOVERY,
4339 						MGMT_STATUS_FAILED,
4340 						&cp->type, sizeof(cp->type));
4341 			mgmt_pending_remove(cmd);
4342 			goto failed;
4343 		}
4344 	}
4345 
4346 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4347 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4348 	err = 0;
4349 
4350 failed:
4351 	hci_dev_unlock(hdev);
4352 	return err;
4353 }
4354 
4355 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4356 {
4357 	struct mgmt_pending_cmd *cmd;
4358 
4359 	bt_dev_dbg(hdev, "status %d", status);
4360 
4361 	hci_dev_lock(hdev);
4362 
4363 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4364 	if (cmd) {
4365 		cmd->cmd_complete(cmd, mgmt_status(status));
4366 		mgmt_pending_remove(cmd);
4367 	}
4368 
4369 	hci_dev_unlock(hdev);
4370 
4371 	/* Handle suspend notifier */
4372 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4373 		bt_dev_dbg(hdev, "Paused discovery");
4374 		wake_up(&hdev->suspend_wait_q);
4375 	}
4376 }
4377 
4378 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4379 			  u16 len)
4380 {
4381 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4382 	struct mgmt_pending_cmd *cmd;
4383 	int err;
4384 
4385 	bt_dev_dbg(hdev, "sock %p", sk);
4386 
4387 	hci_dev_lock(hdev);
4388 
4389 	if (!hci_discovery_active(hdev)) {
4390 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4391 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4392 					sizeof(mgmt_cp->type));
4393 		goto unlock;
4394 	}
4395 
4396 	if (hdev->discovery.type != mgmt_cp->type) {
4397 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4398 					MGMT_STATUS_INVALID_PARAMS,
4399 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4400 		goto unlock;
4401 	}
4402 
4403 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4404 	if (!cmd) {
4405 		err = -ENOMEM;
4406 		goto unlock;
4407 	}
4408 
4409 	cmd->cmd_complete = generic_cmd_complete;
4410 
4411 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4412 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4413 	err = 0;
4414 
4415 unlock:
4416 	hci_dev_unlock(hdev);
4417 	return err;
4418 }
4419 
4420 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4421 			u16 len)
4422 {
4423 	struct mgmt_cp_confirm_name *cp = data;
4424 	struct inquiry_entry *e;
4425 	int err;
4426 
4427 	bt_dev_dbg(hdev, "sock %p", sk);
4428 
4429 	hci_dev_lock(hdev);
4430 
4431 	if (!hci_discovery_active(hdev)) {
4432 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4433 					MGMT_STATUS_FAILED, &cp->addr,
4434 					sizeof(cp->addr));
4435 		goto failed;
4436 	}
4437 
4438 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4439 	if (!e) {
4440 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4441 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4442 					sizeof(cp->addr));
4443 		goto failed;
4444 	}
4445 
4446 	if (cp->name_known) {
4447 		e->name_state = NAME_KNOWN;
4448 		list_del(&e->list);
4449 	} else {
4450 		e->name_state = NAME_NEEDED;
4451 		hci_inquiry_cache_update_resolve(hdev, e);
4452 	}
4453 
4454 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4455 				&cp->addr, sizeof(cp->addr));
4456 
4457 failed:
4458 	hci_dev_unlock(hdev);
4459 	return err;
4460 }
4461 
4462 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4463 			u16 len)
4464 {
4465 	struct mgmt_cp_block_device *cp = data;
4466 	u8 status;
4467 	int err;
4468 
4469 	bt_dev_dbg(hdev, "sock %p", sk);
4470 
4471 	if (!bdaddr_type_is_valid(cp->addr.type))
4472 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4473 					 MGMT_STATUS_INVALID_PARAMS,
4474 					 &cp->addr, sizeof(cp->addr));
4475 
4476 	hci_dev_lock(hdev);
4477 
4478 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4479 				  cp->addr.type);
4480 	if (err < 0) {
4481 		status = MGMT_STATUS_FAILED;
4482 		goto done;
4483 	}
4484 
4485 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4486 		   sk);
4487 	status = MGMT_STATUS_SUCCESS;
4488 
4489 done:
4490 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4491 				&cp->addr, sizeof(cp->addr));
4492 
4493 	hci_dev_unlock(hdev);
4494 
4495 	return err;
4496 }
4497 
4498 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4499 			  u16 len)
4500 {
4501 	struct mgmt_cp_unblock_device *cp = data;
4502 	u8 status;
4503 	int err;
4504 
4505 	bt_dev_dbg(hdev, "sock %p", sk);
4506 
4507 	if (!bdaddr_type_is_valid(cp->addr.type))
4508 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4509 					 MGMT_STATUS_INVALID_PARAMS,
4510 					 &cp->addr, sizeof(cp->addr));
4511 
4512 	hci_dev_lock(hdev);
4513 
4514 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4515 				  cp->addr.type);
4516 	if (err < 0) {
4517 		status = MGMT_STATUS_INVALID_PARAMS;
4518 		goto done;
4519 	}
4520 
4521 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4522 		   sk);
4523 	status = MGMT_STATUS_SUCCESS;
4524 
4525 done:
4526 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4527 				&cp->addr, sizeof(cp->addr));
4528 
4529 	hci_dev_unlock(hdev);
4530 
4531 	return err;
4532 }
4533 
4534 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4535 			 u16 len)
4536 {
4537 	struct mgmt_cp_set_device_id *cp = data;
4538 	struct hci_request req;
4539 	int err;
4540 	__u16 source;
4541 
4542 	bt_dev_dbg(hdev, "sock %p", sk);
4543 
4544 	source = __le16_to_cpu(cp->source);
4545 
4546 	if (source > 0x0002)
4547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4548 				       MGMT_STATUS_INVALID_PARAMS);
4549 
4550 	hci_dev_lock(hdev);
4551 
4552 	hdev->devid_source = source;
4553 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4554 	hdev->devid_product = __le16_to_cpu(cp->product);
4555 	hdev->devid_version = __le16_to_cpu(cp->version);
4556 
4557 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4558 				NULL, 0);
4559 
4560 	hci_req_init(&req, hdev);
4561 	__hci_req_update_eir(&req);
4562 	hci_req_run(&req, NULL);
4563 
4564 	hci_dev_unlock(hdev);
4565 
4566 	return err;
4567 }
4568 
4569 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4570 					u16 opcode)
4571 {
4572 	bt_dev_dbg(hdev, "status %d", status);
4573 }
4574 
4575 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4576 				     u16 opcode)
4577 {
4578 	struct cmd_lookup match = { NULL, hdev };
4579 	struct hci_request req;
4580 	u8 instance;
4581 	struct adv_info *adv_instance;
4582 	int err;
4583 
4584 	hci_dev_lock(hdev);
4585 
4586 	if (status) {
4587 		u8 mgmt_err = mgmt_status(status);
4588 
4589 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4590 				     cmd_status_rsp, &mgmt_err);
4591 		goto unlock;
4592 	}
4593 
4594 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4595 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4596 	else
4597 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4598 
4599 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4600 			     &match);
4601 
4602 	new_settings(hdev, match.sk);
4603 
4604 	if (match.sk)
4605 		sock_put(match.sk);
4606 
4607 	/* Handle suspend notifier */
4608 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
4609 			       hdev->suspend_tasks)) {
4610 		bt_dev_dbg(hdev, "Paused advertising");
4611 		wake_up(&hdev->suspend_wait_q);
4612 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
4613 				      hdev->suspend_tasks)) {
4614 		bt_dev_dbg(hdev, "Unpaused advertising");
4615 		wake_up(&hdev->suspend_wait_q);
4616 	}
4617 
4618 	/* If "Set Advertising" was just disabled and instance advertising was
4619 	 * set up earlier, then re-enable multi-instance advertising.
4620 	 */
4621 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4622 	    list_empty(&hdev->adv_instances))
4623 		goto unlock;
4624 
4625 	instance = hdev->cur_adv_instance;
4626 	if (!instance) {
4627 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4628 							struct adv_info, list);
4629 		if (!adv_instance)
4630 			goto unlock;
4631 
4632 		instance = adv_instance->instance;
4633 	}
4634 
4635 	hci_req_init(&req, hdev);
4636 
4637 	err = __hci_req_schedule_adv_instance(&req, instance, true);
4638 
4639 	if (!err)
4640 		err = hci_req_run(&req, enable_advertising_instance);
4641 
4642 	if (err)
4643 		bt_dev_err(hdev, "failed to re-configure advertising");
4644 
4645 unlock:
4646 	hci_dev_unlock(hdev);
4647 }
4648 
4649 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4650 			   u16 len)
4651 {
4652 	struct mgmt_mode *cp = data;
4653 	struct mgmt_pending_cmd *cmd;
4654 	struct hci_request req;
4655 	u8 val, status;
4656 	int err;
4657 
4658 	bt_dev_dbg(hdev, "sock %p", sk);
4659 
4660 	status = mgmt_le_support(hdev);
4661 	if (status)
4662 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4663 				       status);
4664 
4665 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4666 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4667 				       MGMT_STATUS_INVALID_PARAMS);
4668 
4669 	if (hdev->advertising_paused)
4670 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4671 				       MGMT_STATUS_BUSY);
4672 
4673 	hci_dev_lock(hdev);
4674 
4675 	val = !!cp->val;
4676 
4677 	/* The following conditions are ones which mean that we should
4678 	 * not do any HCI communication but directly send a mgmt
4679 	 * response to user space (after toggling the flag if
4680 	 * necessary).
4681 	 */
4682 	if (!hdev_is_powered(hdev) ||
4683 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4684 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4685 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4686 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4687 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4688 		bool changed;
4689 
4690 		if (cp->val) {
4691 			hdev->cur_adv_instance = 0x00;
4692 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4693 			if (cp->val == 0x02)
4694 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4695 			else
4696 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4697 		} else {
4698 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4699 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4700 		}
4701 
4702 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4703 		if (err < 0)
4704 			goto unlock;
4705 
4706 		if (changed)
4707 			err = new_settings(hdev, sk);
4708 
4709 		goto unlock;
4710 	}
4711 
4712 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4713 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4714 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4715 				      MGMT_STATUS_BUSY);
4716 		goto unlock;
4717 	}
4718 
4719 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4720 	if (!cmd) {
4721 		err = -ENOMEM;
4722 		goto unlock;
4723 	}
4724 
4725 	hci_req_init(&req, hdev);
4726 
4727 	if (cp->val == 0x02)
4728 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4729 	else
4730 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4731 
4732 	cancel_adv_timeout(hdev);
4733 
4734 	if (val) {
4735 		/* Switch to instance "0" for the Set Advertising setting.
4736 		 * We cannot use update_[adv|scan_rsp]_data() here as the
4737 		 * HCI_ADVERTISING flag is not yet set.
4738 		 */
4739 		hdev->cur_adv_instance = 0x00;
4740 
4741 		if (ext_adv_capable(hdev)) {
4742 			__hci_req_start_ext_adv(&req, 0x00);
4743 		} else {
4744 			__hci_req_update_adv_data(&req, 0x00);
4745 			__hci_req_update_scan_rsp_data(&req, 0x00);
4746 			__hci_req_enable_advertising(&req);
4747 		}
4748 	} else {
4749 		__hci_req_disable_advertising(&req);
4750 	}
4751 
4752 	err = hci_req_run(&req, set_advertising_complete);
4753 	if (err < 0)
4754 		mgmt_pending_remove(cmd);
4755 
4756 unlock:
4757 	hci_dev_unlock(hdev);
4758 	return err;
4759 }
4760 
4761 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4762 			      void *data, u16 len)
4763 {
4764 	struct mgmt_cp_set_static_address *cp = data;
4765 	int err;
4766 
4767 	bt_dev_dbg(hdev, "sock %p", sk);
4768 
4769 	if (!lmp_le_capable(hdev))
4770 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4771 				       MGMT_STATUS_NOT_SUPPORTED);
4772 
4773 	if (hdev_is_powered(hdev))
4774 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4775 				       MGMT_STATUS_REJECTED);
4776 
4777 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4778 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4779 			return mgmt_cmd_status(sk, hdev->id,
4780 					       MGMT_OP_SET_STATIC_ADDRESS,
4781 					       MGMT_STATUS_INVALID_PARAMS);
4782 
4783 		/* Two most significant bits shall be set */
4784 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4785 			return mgmt_cmd_status(sk, hdev->id,
4786 					       MGMT_OP_SET_STATIC_ADDRESS,
4787 					       MGMT_STATUS_INVALID_PARAMS);
4788 	}
4789 
4790 	hci_dev_lock(hdev);
4791 
4792 	bacpy(&hdev->static_addr, &cp->bdaddr);
4793 
4794 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4795 	if (err < 0)
4796 		goto unlock;
4797 
4798 	err = new_settings(hdev, sk);
4799 
4800 unlock:
4801 	hci_dev_unlock(hdev);
4802 	return err;
4803 }
4804 
4805 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4806 			   void *data, u16 len)
4807 {
4808 	struct mgmt_cp_set_scan_params *cp = data;
4809 	__u16 interval, window;
4810 	int err;
4811 
4812 	bt_dev_dbg(hdev, "sock %p", sk);
4813 
4814 	if (!lmp_le_capable(hdev))
4815 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4816 				       MGMT_STATUS_NOT_SUPPORTED);
4817 
4818 	interval = __le16_to_cpu(cp->interval);
4819 
4820 	if (interval < 0x0004 || interval > 0x4000)
4821 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4822 				       MGMT_STATUS_INVALID_PARAMS);
4823 
4824 	window = __le16_to_cpu(cp->window);
4825 
4826 	if (window < 0x0004 || window > 0x4000)
4827 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4828 				       MGMT_STATUS_INVALID_PARAMS);
4829 
4830 	if (window > interval)
4831 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4832 				       MGMT_STATUS_INVALID_PARAMS);
4833 
4834 	hci_dev_lock(hdev);
4835 
4836 	hdev->le_scan_interval = interval;
4837 	hdev->le_scan_window = window;
4838 
4839 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4840 				NULL, 0);
4841 
4842 	/* If background scan is running, restart it so new parameters are
4843 	 * loaded.
4844 	 */
4845 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4846 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4847 		struct hci_request req;
4848 
4849 		hci_req_init(&req, hdev);
4850 
4851 		hci_req_add_le_scan_disable(&req);
4852 		hci_req_add_le_passive_scan(&req);
4853 
4854 		hci_req_run(&req, NULL);
4855 	}
4856 
4857 	hci_dev_unlock(hdev);
4858 
4859 	return err;
4860 }
4861 
4862 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4863 				      u16 opcode)
4864 {
4865 	struct mgmt_pending_cmd *cmd;
4866 
4867 	bt_dev_dbg(hdev, "status 0x%02x", status);
4868 
4869 	hci_dev_lock(hdev);
4870 
4871 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4872 	if (!cmd)
4873 		goto unlock;
4874 
4875 	if (status) {
4876 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4877 			        mgmt_status(status));
4878 	} else {
4879 		struct mgmt_mode *cp = cmd->param;
4880 
4881 		if (cp->val)
4882 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4883 		else
4884 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4885 
4886 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4887 		new_settings(hdev, cmd->sk);
4888 	}
4889 
4890 	mgmt_pending_remove(cmd);
4891 
4892 unlock:
4893 	hci_dev_unlock(hdev);
4894 }
4895 
4896 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4897 				void *data, u16 len)
4898 {
4899 	struct mgmt_mode *cp = data;
4900 	struct mgmt_pending_cmd *cmd;
4901 	struct hci_request req;
4902 	int err;
4903 
4904 	bt_dev_dbg(hdev, "sock %p", sk);
4905 
4906 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4907 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4908 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4909 				       MGMT_STATUS_NOT_SUPPORTED);
4910 
4911 	if (cp->val != 0x00 && cp->val != 0x01)
4912 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4913 				       MGMT_STATUS_INVALID_PARAMS);
4914 
4915 	hci_dev_lock(hdev);
4916 
4917 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4918 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4919 				      MGMT_STATUS_BUSY);
4920 		goto unlock;
4921 	}
4922 
4923 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4924 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4925 					hdev);
4926 		goto unlock;
4927 	}
4928 
4929 	if (!hdev_is_powered(hdev)) {
4930 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4931 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4932 					hdev);
4933 		new_settings(hdev, sk);
4934 		goto unlock;
4935 	}
4936 
4937 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4938 			       data, len);
4939 	if (!cmd) {
4940 		err = -ENOMEM;
4941 		goto unlock;
4942 	}
4943 
4944 	hci_req_init(&req, hdev);
4945 
4946 	__hci_req_write_fast_connectable(&req, cp->val);
4947 
4948 	err = hci_req_run(&req, fast_connectable_complete);
4949 	if (err < 0) {
4950 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4951 				      MGMT_STATUS_FAILED);
4952 		mgmt_pending_remove(cmd);
4953 	}
4954 
4955 unlock:
4956 	hci_dev_unlock(hdev);
4957 
4958 	return err;
4959 }
4960 
4961 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4962 {
4963 	struct mgmt_pending_cmd *cmd;
4964 
4965 	bt_dev_dbg(hdev, "status 0x%02x", status);
4966 
4967 	hci_dev_lock(hdev);
4968 
4969 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4970 	if (!cmd)
4971 		goto unlock;
4972 
4973 	if (status) {
4974 		u8 mgmt_err = mgmt_status(status);
4975 
4976 		/* We need to restore the flag if related HCI commands
4977 		 * failed.
4978 		 */
4979 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4980 
4981 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4982 	} else {
4983 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4984 		new_settings(hdev, cmd->sk);
4985 	}
4986 
4987 	mgmt_pending_remove(cmd);
4988 
4989 unlock:
4990 	hci_dev_unlock(hdev);
4991 }
4992 
4993 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4994 {
4995 	struct mgmt_mode *cp = data;
4996 	struct mgmt_pending_cmd *cmd;
4997 	struct hci_request req;
4998 	int err;
4999 
5000 	bt_dev_dbg(hdev, "sock %p", sk);
5001 
5002 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5003 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5004 				       MGMT_STATUS_NOT_SUPPORTED);
5005 
5006 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5007 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5008 				       MGMT_STATUS_REJECTED);
5009 
5010 	if (cp->val != 0x00 && cp->val != 0x01)
5011 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5012 				       MGMT_STATUS_INVALID_PARAMS);
5013 
5014 	hci_dev_lock(hdev);
5015 
5016 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5017 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5018 		goto unlock;
5019 	}
5020 
5021 	if (!hdev_is_powered(hdev)) {
5022 		if (!cp->val) {
5023 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5024 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5025 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5026 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5027 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5028 		}
5029 
5030 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5031 
5032 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5033 		if (err < 0)
5034 			goto unlock;
5035 
5036 		err = new_settings(hdev, sk);
5037 		goto unlock;
5038 	}
5039 
5040 	/* Reject disabling when powered on */
5041 	if (!cp->val) {
5042 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5043 				      MGMT_STATUS_REJECTED);
5044 		goto unlock;
5045 	} else {
5046 		/* When configuring a dual-mode controller to operate
5047 		 * with LE only and using a static address, then switching
5048 		 * BR/EDR back on is not allowed.
5049 		 *
5050 		 * Dual-mode controllers shall operate with the public
5051 		 * address as its identity address for BR/EDR and LE. So
5052 		 * reject the attempt to create an invalid configuration.
5053 		 *
5054 		 * The same restrictions applies when secure connections
5055 		 * has been enabled. For BR/EDR this is a controller feature
5056 		 * while for LE it is a host stack feature. This means that
5057 		 * switching BR/EDR back on when secure connections has been
5058 		 * enabled is not a supported transaction.
5059 		 */
5060 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5061 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5062 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5063 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5064 					      MGMT_STATUS_REJECTED);
5065 			goto unlock;
5066 		}
5067 	}
5068 
5069 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5070 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5071 				      MGMT_STATUS_BUSY);
5072 		goto unlock;
5073 	}
5074 
5075 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5076 	if (!cmd) {
5077 		err = -ENOMEM;
5078 		goto unlock;
5079 	}
5080 
5081 	/* We need to flip the bit already here so that
5082 	 * hci_req_update_adv_data generates the correct flags.
5083 	 */
5084 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5085 
5086 	hci_req_init(&req, hdev);
5087 
5088 	__hci_req_write_fast_connectable(&req, false);
5089 	__hci_req_update_scan(&req);
5090 
5091 	/* Since only the advertising data flags will change, there
5092 	 * is no need to update the scan response data.
5093 	 */
5094 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5095 
5096 	err = hci_req_run(&req, set_bredr_complete);
5097 	if (err < 0)
5098 		mgmt_pending_remove(cmd);
5099 
5100 unlock:
5101 	hci_dev_unlock(hdev);
5102 	return err;
5103 }
5104 
5105 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5106 {
5107 	struct mgmt_pending_cmd *cmd;
5108 	struct mgmt_mode *cp;
5109 
5110 	bt_dev_dbg(hdev, "status %u", status);
5111 
5112 	hci_dev_lock(hdev);
5113 
5114 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5115 	if (!cmd)
5116 		goto unlock;
5117 
5118 	if (status) {
5119 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5120 			        mgmt_status(status));
5121 		goto remove;
5122 	}
5123 
5124 	cp = cmd->param;
5125 
5126 	switch (cp->val) {
5127 	case 0x00:
5128 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5129 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5130 		break;
5131 	case 0x01:
5132 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5133 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5134 		break;
5135 	case 0x02:
5136 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5137 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5138 		break;
5139 	}
5140 
5141 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5142 	new_settings(hdev, cmd->sk);
5143 
5144 remove:
5145 	mgmt_pending_remove(cmd);
5146 unlock:
5147 	hci_dev_unlock(hdev);
5148 }
5149 
5150 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5151 			   void *data, u16 len)
5152 {
5153 	struct mgmt_mode *cp = data;
5154 	struct mgmt_pending_cmd *cmd;
5155 	struct hci_request req;
5156 	u8 val;
5157 	int err;
5158 
5159 	bt_dev_dbg(hdev, "sock %p", sk);
5160 
5161 	if (!lmp_sc_capable(hdev) &&
5162 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5163 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5164 				       MGMT_STATUS_NOT_SUPPORTED);
5165 
5166 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5167 	    lmp_sc_capable(hdev) &&
5168 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5169 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5170 				       MGMT_STATUS_REJECTED);
5171 
5172 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5173 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5174 				  MGMT_STATUS_INVALID_PARAMS);
5175 
5176 	hci_dev_lock(hdev);
5177 
5178 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5179 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5180 		bool changed;
5181 
5182 		if (cp->val) {
5183 			changed = !hci_dev_test_and_set_flag(hdev,
5184 							     HCI_SC_ENABLED);
5185 			if (cp->val == 0x02)
5186 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5187 			else
5188 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5189 		} else {
5190 			changed = hci_dev_test_and_clear_flag(hdev,
5191 							      HCI_SC_ENABLED);
5192 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5193 		}
5194 
5195 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5196 		if (err < 0)
5197 			goto failed;
5198 
5199 		if (changed)
5200 			err = new_settings(hdev, sk);
5201 
5202 		goto failed;
5203 	}
5204 
5205 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5206 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5207 				      MGMT_STATUS_BUSY);
5208 		goto failed;
5209 	}
5210 
5211 	val = !!cp->val;
5212 
5213 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5214 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5215 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5216 		goto failed;
5217 	}
5218 
5219 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5220 	if (!cmd) {
5221 		err = -ENOMEM;
5222 		goto failed;
5223 	}
5224 
5225 	hci_req_init(&req, hdev);
5226 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5227 	err = hci_req_run(&req, sc_enable_complete);
5228 	if (err < 0) {
5229 		mgmt_pending_remove(cmd);
5230 		goto failed;
5231 	}
5232 
5233 failed:
5234 	hci_dev_unlock(hdev);
5235 	return err;
5236 }
5237 
5238 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5239 			  void *data, u16 len)
5240 {
5241 	struct mgmt_mode *cp = data;
5242 	bool changed, use_changed;
5243 	int err;
5244 
5245 	bt_dev_dbg(hdev, "sock %p", sk);
5246 
5247 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5248 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5249 				       MGMT_STATUS_INVALID_PARAMS);
5250 
5251 	hci_dev_lock(hdev);
5252 
5253 	if (cp->val)
5254 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5255 	else
5256 		changed = hci_dev_test_and_clear_flag(hdev,
5257 						      HCI_KEEP_DEBUG_KEYS);
5258 
5259 	if (cp->val == 0x02)
5260 		use_changed = !hci_dev_test_and_set_flag(hdev,
5261 							 HCI_USE_DEBUG_KEYS);
5262 	else
5263 		use_changed = hci_dev_test_and_clear_flag(hdev,
5264 							  HCI_USE_DEBUG_KEYS);
5265 
5266 	if (hdev_is_powered(hdev) && use_changed &&
5267 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5268 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5269 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5270 			     sizeof(mode), &mode);
5271 	}
5272 
5273 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5274 	if (err < 0)
5275 		goto unlock;
5276 
5277 	if (changed)
5278 		err = new_settings(hdev, sk);
5279 
5280 unlock:
5281 	hci_dev_unlock(hdev);
5282 	return err;
5283 }
5284 
5285 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5286 		       u16 len)
5287 {
5288 	struct mgmt_cp_set_privacy *cp = cp_data;
5289 	bool changed;
5290 	int err;
5291 
5292 	bt_dev_dbg(hdev, "sock %p", sk);
5293 
5294 	if (!lmp_le_capable(hdev))
5295 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5296 				       MGMT_STATUS_NOT_SUPPORTED);
5297 
5298 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5299 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5300 				       MGMT_STATUS_INVALID_PARAMS);
5301 
5302 	if (hdev_is_powered(hdev))
5303 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5304 				       MGMT_STATUS_REJECTED);
5305 
5306 	hci_dev_lock(hdev);
5307 
5308 	/* If user space supports this command it is also expected to
5309 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5310 	 */
5311 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5312 
5313 	if (cp->privacy) {
5314 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5315 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5316 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5317 		hci_adv_instances_set_rpa_expired(hdev, true);
5318 		if (cp->privacy == 0x02)
5319 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5320 		else
5321 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5322 	} else {
5323 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5324 		memset(hdev->irk, 0, sizeof(hdev->irk));
5325 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5326 		hci_adv_instances_set_rpa_expired(hdev, false);
5327 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5328 	}
5329 
5330 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5331 	if (err < 0)
5332 		goto unlock;
5333 
5334 	if (changed)
5335 		err = new_settings(hdev, sk);
5336 
5337 unlock:
5338 	hci_dev_unlock(hdev);
5339 	return err;
5340 }
5341 
5342 static bool irk_is_valid(struct mgmt_irk_info *irk)
5343 {
5344 	switch (irk->addr.type) {
5345 	case BDADDR_LE_PUBLIC:
5346 		return true;
5347 
5348 	case BDADDR_LE_RANDOM:
5349 		/* Two most significant bits shall be set */
5350 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5351 			return false;
5352 		return true;
5353 	}
5354 
5355 	return false;
5356 }
5357 
5358 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5359 		     u16 len)
5360 {
5361 	struct mgmt_cp_load_irks *cp = cp_data;
5362 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5363 				   sizeof(struct mgmt_irk_info));
5364 	u16 irk_count, expected_len;
5365 	int i, err;
5366 
5367 	bt_dev_dbg(hdev, "sock %p", sk);
5368 
5369 	if (!lmp_le_capable(hdev))
5370 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5371 				       MGMT_STATUS_NOT_SUPPORTED);
5372 
5373 	irk_count = __le16_to_cpu(cp->irk_count);
5374 	if (irk_count > max_irk_count) {
5375 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5376 			   irk_count);
5377 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5378 				       MGMT_STATUS_INVALID_PARAMS);
5379 	}
5380 
5381 	expected_len = struct_size(cp, irks, irk_count);
5382 	if (expected_len != len) {
5383 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5384 			   expected_len, len);
5385 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5386 				       MGMT_STATUS_INVALID_PARAMS);
5387 	}
5388 
5389 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
5390 
5391 	for (i = 0; i < irk_count; i++) {
5392 		struct mgmt_irk_info *key = &cp->irks[i];
5393 
5394 		if (!irk_is_valid(key))
5395 			return mgmt_cmd_status(sk, hdev->id,
5396 					       MGMT_OP_LOAD_IRKS,
5397 					       MGMT_STATUS_INVALID_PARAMS);
5398 	}
5399 
5400 	hci_dev_lock(hdev);
5401 
5402 	hci_smp_irks_clear(hdev);
5403 
5404 	for (i = 0; i < irk_count; i++) {
5405 		struct mgmt_irk_info *irk = &cp->irks[i];
5406 
5407 		if (hci_is_blocked_key(hdev,
5408 				       HCI_BLOCKED_KEY_TYPE_IRK,
5409 				       irk->val)) {
5410 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5411 				    &irk->addr.bdaddr);
5412 			continue;
5413 		}
5414 
5415 		hci_add_irk(hdev, &irk->addr.bdaddr,
5416 			    le_addr_type(irk->addr.type), irk->val,
5417 			    BDADDR_ANY);
5418 	}
5419 
5420 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5421 
5422 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5423 
5424 	hci_dev_unlock(hdev);
5425 
5426 	return err;
5427 }
5428 
5429 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5430 {
5431 	if (key->master != 0x00 && key->master != 0x01)
5432 		return false;
5433 
5434 	switch (key->addr.type) {
5435 	case BDADDR_LE_PUBLIC:
5436 		return true;
5437 
5438 	case BDADDR_LE_RANDOM:
5439 		/* Two most significant bits shall be set */
5440 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5441 			return false;
5442 		return true;
5443 	}
5444 
5445 	return false;
5446 }
5447 
5448 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5449 			       void *cp_data, u16 len)
5450 {
5451 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5452 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5453 				   sizeof(struct mgmt_ltk_info));
5454 	u16 key_count, expected_len;
5455 	int i, err;
5456 
5457 	bt_dev_dbg(hdev, "sock %p", sk);
5458 
5459 	if (!lmp_le_capable(hdev))
5460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5461 				       MGMT_STATUS_NOT_SUPPORTED);
5462 
5463 	key_count = __le16_to_cpu(cp->key_count);
5464 	if (key_count > max_key_count) {
5465 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5466 			   key_count);
5467 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5468 				       MGMT_STATUS_INVALID_PARAMS);
5469 	}
5470 
5471 	expected_len = struct_size(cp, keys, key_count);
5472 	if (expected_len != len) {
5473 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5474 			   expected_len, len);
5475 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5476 				       MGMT_STATUS_INVALID_PARAMS);
5477 	}
5478 
5479 	bt_dev_dbg(hdev, "key_count %u", key_count);
5480 
5481 	for (i = 0; i < key_count; i++) {
5482 		struct mgmt_ltk_info *key = &cp->keys[i];
5483 
5484 		if (!ltk_is_valid(key))
5485 			return mgmt_cmd_status(sk, hdev->id,
5486 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5487 					       MGMT_STATUS_INVALID_PARAMS);
5488 	}
5489 
5490 	hci_dev_lock(hdev);
5491 
5492 	hci_smp_ltks_clear(hdev);
5493 
5494 	for (i = 0; i < key_count; i++) {
5495 		struct mgmt_ltk_info *key = &cp->keys[i];
5496 		u8 type, authenticated;
5497 
5498 		if (hci_is_blocked_key(hdev,
5499 				       HCI_BLOCKED_KEY_TYPE_LTK,
5500 				       key->val)) {
5501 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
5502 				    &key->addr.bdaddr);
5503 			continue;
5504 		}
5505 
5506 		switch (key->type) {
5507 		case MGMT_LTK_UNAUTHENTICATED:
5508 			authenticated = 0x00;
5509 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5510 			break;
5511 		case MGMT_LTK_AUTHENTICATED:
5512 			authenticated = 0x01;
5513 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5514 			break;
5515 		case MGMT_LTK_P256_UNAUTH:
5516 			authenticated = 0x00;
5517 			type = SMP_LTK_P256;
5518 			break;
5519 		case MGMT_LTK_P256_AUTH:
5520 			authenticated = 0x01;
5521 			type = SMP_LTK_P256;
5522 			break;
5523 		case MGMT_LTK_P256_DEBUG:
5524 			authenticated = 0x00;
5525 			type = SMP_LTK_P256_DEBUG;
5526 			/* fall through */
5527 		default:
5528 			continue;
5529 		}
5530 
5531 		hci_add_ltk(hdev, &key->addr.bdaddr,
5532 			    le_addr_type(key->addr.type), type, authenticated,
5533 			    key->val, key->enc_size, key->ediv, key->rand);
5534 	}
5535 
5536 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5537 			   NULL, 0);
5538 
5539 	hci_dev_unlock(hdev);
5540 
5541 	return err;
5542 }
5543 
5544 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5545 {
5546 	struct hci_conn *conn = cmd->user_data;
5547 	struct mgmt_rp_get_conn_info rp;
5548 	int err;
5549 
5550 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5551 
5552 	if (status == MGMT_STATUS_SUCCESS) {
5553 		rp.rssi = conn->rssi;
5554 		rp.tx_power = conn->tx_power;
5555 		rp.max_tx_power = conn->max_tx_power;
5556 	} else {
5557 		rp.rssi = HCI_RSSI_INVALID;
5558 		rp.tx_power = HCI_TX_POWER_INVALID;
5559 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5560 	}
5561 
5562 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5563 				status, &rp, sizeof(rp));
5564 
5565 	hci_conn_drop(conn);
5566 	hci_conn_put(conn);
5567 
5568 	return err;
5569 }
5570 
5571 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5572 				       u16 opcode)
5573 {
5574 	struct hci_cp_read_rssi *cp;
5575 	struct mgmt_pending_cmd *cmd;
5576 	struct hci_conn *conn;
5577 	u16 handle;
5578 	u8 status;
5579 
5580 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
5581 
5582 	hci_dev_lock(hdev);
5583 
5584 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5585 	 * Level so we check which one was last sent to retrieve connection
5586 	 * handle.  Both commands have handle as first parameter so it's safe to
5587 	 * cast data on the same command struct.
5588 	 *
5589 	 * First command sent is always Read RSSI and we fail only if it fails.
5590 	 * In other case we simply override error to indicate success as we
5591 	 * already remembered if TX power value is actually valid.
5592 	 */
5593 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5594 	if (!cp) {
5595 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5596 		status = MGMT_STATUS_SUCCESS;
5597 	} else {
5598 		status = mgmt_status(hci_status);
5599 	}
5600 
5601 	if (!cp) {
5602 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5603 		goto unlock;
5604 	}
5605 
5606 	handle = __le16_to_cpu(cp->handle);
5607 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5608 	if (!conn) {
5609 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5610 			   handle);
5611 		goto unlock;
5612 	}
5613 
5614 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5615 	if (!cmd)
5616 		goto unlock;
5617 
5618 	cmd->cmd_complete(cmd, status);
5619 	mgmt_pending_remove(cmd);
5620 
5621 unlock:
5622 	hci_dev_unlock(hdev);
5623 }
5624 
5625 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5626 			 u16 len)
5627 {
5628 	struct mgmt_cp_get_conn_info *cp = data;
5629 	struct mgmt_rp_get_conn_info rp;
5630 	struct hci_conn *conn;
5631 	unsigned long conn_info_age;
5632 	int err = 0;
5633 
5634 	bt_dev_dbg(hdev, "sock %p", sk);
5635 
5636 	memset(&rp, 0, sizeof(rp));
5637 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5638 	rp.addr.type = cp->addr.type;
5639 
5640 	if (!bdaddr_type_is_valid(cp->addr.type))
5641 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5642 					 MGMT_STATUS_INVALID_PARAMS,
5643 					 &rp, sizeof(rp));
5644 
5645 	hci_dev_lock(hdev);
5646 
5647 	if (!hdev_is_powered(hdev)) {
5648 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5649 					MGMT_STATUS_NOT_POWERED, &rp,
5650 					sizeof(rp));
5651 		goto unlock;
5652 	}
5653 
5654 	if (cp->addr.type == BDADDR_BREDR)
5655 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5656 					       &cp->addr.bdaddr);
5657 	else
5658 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5659 
5660 	if (!conn || conn->state != BT_CONNECTED) {
5661 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5662 					MGMT_STATUS_NOT_CONNECTED, &rp,
5663 					sizeof(rp));
5664 		goto unlock;
5665 	}
5666 
5667 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5668 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5669 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5670 		goto unlock;
5671 	}
5672 
5673 	/* To avoid client trying to guess when to poll again for information we
5674 	 * calculate conn info age as random value between min/max set in hdev.
5675 	 */
5676 	conn_info_age = hdev->conn_info_min_age +
5677 			prandom_u32_max(hdev->conn_info_max_age -
5678 					hdev->conn_info_min_age);
5679 
5680 	/* Query controller to refresh cached values if they are too old or were
5681 	 * never read.
5682 	 */
5683 	if (time_after(jiffies, conn->conn_info_timestamp +
5684 		       msecs_to_jiffies(conn_info_age)) ||
5685 	    !conn->conn_info_timestamp) {
5686 		struct hci_request req;
5687 		struct hci_cp_read_tx_power req_txp_cp;
5688 		struct hci_cp_read_rssi req_rssi_cp;
5689 		struct mgmt_pending_cmd *cmd;
5690 
5691 		hci_req_init(&req, hdev);
5692 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5693 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5694 			    &req_rssi_cp);
5695 
5696 		/* For LE links TX power does not change thus we don't need to
5697 		 * query for it once value is known.
5698 		 */
5699 		if (!bdaddr_type_is_le(cp->addr.type) ||
5700 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5701 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5702 			req_txp_cp.type = 0x00;
5703 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5704 				    sizeof(req_txp_cp), &req_txp_cp);
5705 		}
5706 
5707 		/* Max TX power needs to be read only once per connection */
5708 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5709 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5710 			req_txp_cp.type = 0x01;
5711 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5712 				    sizeof(req_txp_cp), &req_txp_cp);
5713 		}
5714 
5715 		err = hci_req_run(&req, conn_info_refresh_complete);
5716 		if (err < 0)
5717 			goto unlock;
5718 
5719 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5720 				       data, len);
5721 		if (!cmd) {
5722 			err = -ENOMEM;
5723 			goto unlock;
5724 		}
5725 
5726 		hci_conn_hold(conn);
5727 		cmd->user_data = hci_conn_get(conn);
5728 		cmd->cmd_complete = conn_info_cmd_complete;
5729 
5730 		conn->conn_info_timestamp = jiffies;
5731 	} else {
5732 		/* Cache is valid, just reply with values cached in hci_conn */
5733 		rp.rssi = conn->rssi;
5734 		rp.tx_power = conn->tx_power;
5735 		rp.max_tx_power = conn->max_tx_power;
5736 
5737 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5738 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5739 	}
5740 
5741 unlock:
5742 	hci_dev_unlock(hdev);
5743 	return err;
5744 }
5745 
5746 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5747 {
5748 	struct hci_conn *conn = cmd->user_data;
5749 	struct mgmt_rp_get_clock_info rp;
5750 	struct hci_dev *hdev;
5751 	int err;
5752 
5753 	memset(&rp, 0, sizeof(rp));
5754 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5755 
5756 	if (status)
5757 		goto complete;
5758 
5759 	hdev = hci_dev_get(cmd->index);
5760 	if (hdev) {
5761 		rp.local_clock = cpu_to_le32(hdev->clock);
5762 		hci_dev_put(hdev);
5763 	}
5764 
5765 	if (conn) {
5766 		rp.piconet_clock = cpu_to_le32(conn->clock);
5767 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5768 	}
5769 
5770 complete:
5771 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5772 				sizeof(rp));
5773 
5774 	if (conn) {
5775 		hci_conn_drop(conn);
5776 		hci_conn_put(conn);
5777 	}
5778 
5779 	return err;
5780 }
5781 
5782 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5783 {
5784 	struct hci_cp_read_clock *hci_cp;
5785 	struct mgmt_pending_cmd *cmd;
5786 	struct hci_conn *conn;
5787 
5788 	bt_dev_dbg(hdev, "status %u", status);
5789 
5790 	hci_dev_lock(hdev);
5791 
5792 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5793 	if (!hci_cp)
5794 		goto unlock;
5795 
5796 	if (hci_cp->which) {
5797 		u16 handle = __le16_to_cpu(hci_cp->handle);
5798 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5799 	} else {
5800 		conn = NULL;
5801 	}
5802 
5803 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5804 	if (!cmd)
5805 		goto unlock;
5806 
5807 	cmd->cmd_complete(cmd, mgmt_status(status));
5808 	mgmt_pending_remove(cmd);
5809 
5810 unlock:
5811 	hci_dev_unlock(hdev);
5812 }
5813 
5814 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5815 			 u16 len)
5816 {
5817 	struct mgmt_cp_get_clock_info *cp = data;
5818 	struct mgmt_rp_get_clock_info rp;
5819 	struct hci_cp_read_clock hci_cp;
5820 	struct mgmt_pending_cmd *cmd;
5821 	struct hci_request req;
5822 	struct hci_conn *conn;
5823 	int err;
5824 
5825 	bt_dev_dbg(hdev, "sock %p", sk);
5826 
5827 	memset(&rp, 0, sizeof(rp));
5828 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5829 	rp.addr.type = cp->addr.type;
5830 
5831 	if (cp->addr.type != BDADDR_BREDR)
5832 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5833 					 MGMT_STATUS_INVALID_PARAMS,
5834 					 &rp, sizeof(rp));
5835 
5836 	hci_dev_lock(hdev);
5837 
5838 	if (!hdev_is_powered(hdev)) {
5839 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5840 					MGMT_STATUS_NOT_POWERED, &rp,
5841 					sizeof(rp));
5842 		goto unlock;
5843 	}
5844 
5845 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5846 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5847 					       &cp->addr.bdaddr);
5848 		if (!conn || conn->state != BT_CONNECTED) {
5849 			err = mgmt_cmd_complete(sk, hdev->id,
5850 						MGMT_OP_GET_CLOCK_INFO,
5851 						MGMT_STATUS_NOT_CONNECTED,
5852 						&rp, sizeof(rp));
5853 			goto unlock;
5854 		}
5855 	} else {
5856 		conn = NULL;
5857 	}
5858 
5859 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5860 	if (!cmd) {
5861 		err = -ENOMEM;
5862 		goto unlock;
5863 	}
5864 
5865 	cmd->cmd_complete = clock_info_cmd_complete;
5866 
5867 	hci_req_init(&req, hdev);
5868 
5869 	memset(&hci_cp, 0, sizeof(hci_cp));
5870 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5871 
5872 	if (conn) {
5873 		hci_conn_hold(conn);
5874 		cmd->user_data = hci_conn_get(conn);
5875 
5876 		hci_cp.handle = cpu_to_le16(conn->handle);
5877 		hci_cp.which = 0x01; /* Piconet clock */
5878 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5879 	}
5880 
5881 	err = hci_req_run(&req, get_clock_info_complete);
5882 	if (err < 0)
5883 		mgmt_pending_remove(cmd);
5884 
5885 unlock:
5886 	hci_dev_unlock(hdev);
5887 	return err;
5888 }
5889 
5890 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5891 {
5892 	struct hci_conn *conn;
5893 
5894 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5895 	if (!conn)
5896 		return false;
5897 
5898 	if (conn->dst_type != type)
5899 		return false;
5900 
5901 	if (conn->state != BT_CONNECTED)
5902 		return false;
5903 
5904 	return true;
5905 }
5906 
5907 /* This function requires the caller holds hdev->lock */
5908 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5909 			       u8 addr_type, u8 auto_connect)
5910 {
5911 	struct hci_conn_params *params;
5912 
5913 	params = hci_conn_params_add(hdev, addr, addr_type);
5914 	if (!params)
5915 		return -EIO;
5916 
5917 	if (params->auto_connect == auto_connect)
5918 		return 0;
5919 
5920 	list_del_init(&params->action);
5921 
5922 	switch (auto_connect) {
5923 	case HCI_AUTO_CONN_DISABLED:
5924 	case HCI_AUTO_CONN_LINK_LOSS:
5925 		/* If auto connect is being disabled when we're trying to
5926 		 * connect to device, keep connecting.
5927 		 */
5928 		if (params->explicit_connect)
5929 			list_add(&params->action, &hdev->pend_le_conns);
5930 		break;
5931 	case HCI_AUTO_CONN_REPORT:
5932 		if (params->explicit_connect)
5933 			list_add(&params->action, &hdev->pend_le_conns);
5934 		else
5935 			list_add(&params->action, &hdev->pend_le_reports);
5936 		break;
5937 	case HCI_AUTO_CONN_DIRECT:
5938 	case HCI_AUTO_CONN_ALWAYS:
5939 		if (!is_connected(hdev, addr, addr_type))
5940 			list_add(&params->action, &hdev->pend_le_conns);
5941 		break;
5942 	}
5943 
5944 	params->auto_connect = auto_connect;
5945 
5946 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
5947 		   addr, addr_type, auto_connect);
5948 
5949 	return 0;
5950 }
5951 
5952 static void device_added(struct sock *sk, struct hci_dev *hdev,
5953 			 bdaddr_t *bdaddr, u8 type, u8 action)
5954 {
5955 	struct mgmt_ev_device_added ev;
5956 
5957 	bacpy(&ev.addr.bdaddr, bdaddr);
5958 	ev.addr.type = type;
5959 	ev.action = action;
5960 
5961 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5962 }
5963 
5964 static int add_device(struct sock *sk, struct hci_dev *hdev,
5965 		      void *data, u16 len)
5966 {
5967 	struct mgmt_cp_add_device *cp = data;
5968 	u8 auto_conn, addr_type;
5969 	int err;
5970 
5971 	bt_dev_dbg(hdev, "sock %p", sk);
5972 
5973 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5974 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5975 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5976 					 MGMT_STATUS_INVALID_PARAMS,
5977 					 &cp->addr, sizeof(cp->addr));
5978 
5979 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5980 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5981 					 MGMT_STATUS_INVALID_PARAMS,
5982 					 &cp->addr, sizeof(cp->addr));
5983 
5984 	hci_dev_lock(hdev);
5985 
5986 	if (cp->addr.type == BDADDR_BREDR) {
5987 		/* Only incoming connections action is supported for now */
5988 		if (cp->action != 0x01) {
5989 			err = mgmt_cmd_complete(sk, hdev->id,
5990 						MGMT_OP_ADD_DEVICE,
5991 						MGMT_STATUS_INVALID_PARAMS,
5992 						&cp->addr, sizeof(cp->addr));
5993 			goto unlock;
5994 		}
5995 
5996 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5997 					  cp->addr.type);
5998 		if (err)
5999 			goto unlock;
6000 
6001 		hci_req_update_scan(hdev);
6002 
6003 		goto added;
6004 	}
6005 
6006 	addr_type = le_addr_type(cp->addr.type);
6007 
6008 	if (cp->action == 0x02)
6009 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6010 	else if (cp->action == 0x01)
6011 		auto_conn = HCI_AUTO_CONN_DIRECT;
6012 	else
6013 		auto_conn = HCI_AUTO_CONN_REPORT;
6014 
6015 	/* Kernel internally uses conn_params with resolvable private
6016 	 * address, but Add Device allows only identity addresses.
6017 	 * Make sure it is enforced before calling
6018 	 * hci_conn_params_lookup.
6019 	 */
6020 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6021 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6022 					MGMT_STATUS_INVALID_PARAMS,
6023 					&cp->addr, sizeof(cp->addr));
6024 		goto unlock;
6025 	}
6026 
6027 	/* If the connection parameters don't exist for this device,
6028 	 * they will be created and configured with defaults.
6029 	 */
6030 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6031 				auto_conn) < 0) {
6032 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6033 					MGMT_STATUS_FAILED, &cp->addr,
6034 					sizeof(cp->addr));
6035 		goto unlock;
6036 	}
6037 
6038 	hci_update_background_scan(hdev);
6039 
6040 added:
6041 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6042 
6043 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6044 				MGMT_STATUS_SUCCESS, &cp->addr,
6045 				sizeof(cp->addr));
6046 
6047 unlock:
6048 	hci_dev_unlock(hdev);
6049 	return err;
6050 }
6051 
6052 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6053 			   bdaddr_t *bdaddr, u8 type)
6054 {
6055 	struct mgmt_ev_device_removed ev;
6056 
6057 	bacpy(&ev.addr.bdaddr, bdaddr);
6058 	ev.addr.type = type;
6059 
6060 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6061 }
6062 
6063 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6064 			 void *data, u16 len)
6065 {
6066 	struct mgmt_cp_remove_device *cp = data;
6067 	int err;
6068 
6069 	bt_dev_dbg(hdev, "sock %p", sk);
6070 
6071 	hci_dev_lock(hdev);
6072 
6073 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6074 		struct hci_conn_params *params;
6075 		u8 addr_type;
6076 
6077 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6078 			err = mgmt_cmd_complete(sk, hdev->id,
6079 						MGMT_OP_REMOVE_DEVICE,
6080 						MGMT_STATUS_INVALID_PARAMS,
6081 						&cp->addr, sizeof(cp->addr));
6082 			goto unlock;
6083 		}
6084 
6085 		if (cp->addr.type == BDADDR_BREDR) {
6086 			err = hci_bdaddr_list_del(&hdev->whitelist,
6087 						  &cp->addr.bdaddr,
6088 						  cp->addr.type);
6089 			if (err) {
6090 				err = mgmt_cmd_complete(sk, hdev->id,
6091 							MGMT_OP_REMOVE_DEVICE,
6092 							MGMT_STATUS_INVALID_PARAMS,
6093 							&cp->addr,
6094 							sizeof(cp->addr));
6095 				goto unlock;
6096 			}
6097 
6098 			hci_req_update_scan(hdev);
6099 
6100 			device_removed(sk, hdev, &cp->addr.bdaddr,
6101 				       cp->addr.type);
6102 			goto complete;
6103 		}
6104 
6105 		addr_type = le_addr_type(cp->addr.type);
6106 
6107 		/* Kernel internally uses conn_params with resolvable private
6108 		 * address, but Remove Device allows only identity addresses.
6109 		 * Make sure it is enforced before calling
6110 		 * hci_conn_params_lookup.
6111 		 */
6112 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6113 			err = mgmt_cmd_complete(sk, hdev->id,
6114 						MGMT_OP_REMOVE_DEVICE,
6115 						MGMT_STATUS_INVALID_PARAMS,
6116 						&cp->addr, sizeof(cp->addr));
6117 			goto unlock;
6118 		}
6119 
6120 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6121 						addr_type);
6122 		if (!params) {
6123 			err = mgmt_cmd_complete(sk, hdev->id,
6124 						MGMT_OP_REMOVE_DEVICE,
6125 						MGMT_STATUS_INVALID_PARAMS,
6126 						&cp->addr, sizeof(cp->addr));
6127 			goto unlock;
6128 		}
6129 
6130 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6131 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6132 			err = mgmt_cmd_complete(sk, hdev->id,
6133 						MGMT_OP_REMOVE_DEVICE,
6134 						MGMT_STATUS_INVALID_PARAMS,
6135 						&cp->addr, sizeof(cp->addr));
6136 			goto unlock;
6137 		}
6138 
6139 		list_del(&params->action);
6140 		list_del(&params->list);
6141 		kfree(params);
6142 		hci_update_background_scan(hdev);
6143 
6144 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6145 	} else {
6146 		struct hci_conn_params *p, *tmp;
6147 		struct bdaddr_list *b, *btmp;
6148 
6149 		if (cp->addr.type) {
6150 			err = mgmt_cmd_complete(sk, hdev->id,
6151 						MGMT_OP_REMOVE_DEVICE,
6152 						MGMT_STATUS_INVALID_PARAMS,
6153 						&cp->addr, sizeof(cp->addr));
6154 			goto unlock;
6155 		}
6156 
6157 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6158 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6159 			list_del(&b->list);
6160 			kfree(b);
6161 		}
6162 
6163 		hci_req_update_scan(hdev);
6164 
6165 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6166 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6167 				continue;
6168 			device_removed(sk, hdev, &p->addr, p->addr_type);
6169 			if (p->explicit_connect) {
6170 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6171 				continue;
6172 			}
6173 			list_del(&p->action);
6174 			list_del(&p->list);
6175 			kfree(p);
6176 		}
6177 
6178 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6179 
6180 		hci_update_background_scan(hdev);
6181 	}
6182 
6183 complete:
6184 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6185 				MGMT_STATUS_SUCCESS, &cp->addr,
6186 				sizeof(cp->addr));
6187 unlock:
6188 	hci_dev_unlock(hdev);
6189 	return err;
6190 }
6191 
6192 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6193 			   u16 len)
6194 {
6195 	struct mgmt_cp_load_conn_param *cp = data;
6196 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6197 				     sizeof(struct mgmt_conn_param));
6198 	u16 param_count, expected_len;
6199 	int i;
6200 
6201 	if (!lmp_le_capable(hdev))
6202 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6203 				       MGMT_STATUS_NOT_SUPPORTED);
6204 
6205 	param_count = __le16_to_cpu(cp->param_count);
6206 	if (param_count > max_param_count) {
6207 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6208 			   param_count);
6209 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6210 				       MGMT_STATUS_INVALID_PARAMS);
6211 	}
6212 
6213 	expected_len = struct_size(cp, params, param_count);
6214 	if (expected_len != len) {
6215 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6216 			   expected_len, len);
6217 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6218 				       MGMT_STATUS_INVALID_PARAMS);
6219 	}
6220 
6221 	bt_dev_dbg(hdev, "param_count %u", param_count);
6222 
6223 	hci_dev_lock(hdev);
6224 
6225 	hci_conn_params_clear_disabled(hdev);
6226 
6227 	for (i = 0; i < param_count; i++) {
6228 		struct mgmt_conn_param *param = &cp->params[i];
6229 		struct hci_conn_params *hci_param;
6230 		u16 min, max, latency, timeout;
6231 		u8 addr_type;
6232 
6233 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6234 			   param->addr.type);
6235 
6236 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6237 			addr_type = ADDR_LE_DEV_PUBLIC;
6238 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6239 			addr_type = ADDR_LE_DEV_RANDOM;
6240 		} else {
6241 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6242 			continue;
6243 		}
6244 
6245 		min = le16_to_cpu(param->min_interval);
6246 		max = le16_to_cpu(param->max_interval);
6247 		latency = le16_to_cpu(param->latency);
6248 		timeout = le16_to_cpu(param->timeout);
6249 
6250 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6251 			   min, max, latency, timeout);
6252 
6253 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6254 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6255 			continue;
6256 		}
6257 
6258 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6259 						addr_type);
6260 		if (!hci_param) {
6261 			bt_dev_err(hdev, "failed to add connection parameters");
6262 			continue;
6263 		}
6264 
6265 		hci_param->conn_min_interval = min;
6266 		hci_param->conn_max_interval = max;
6267 		hci_param->conn_latency = latency;
6268 		hci_param->supervision_timeout = timeout;
6269 	}
6270 
6271 	hci_dev_unlock(hdev);
6272 
6273 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6274 				 NULL, 0);
6275 }
6276 
6277 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6278 			       void *data, u16 len)
6279 {
6280 	struct mgmt_cp_set_external_config *cp = data;
6281 	bool changed;
6282 	int err;
6283 
6284 	bt_dev_dbg(hdev, "sock %p", sk);
6285 
6286 	if (hdev_is_powered(hdev))
6287 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6288 				       MGMT_STATUS_REJECTED);
6289 
6290 	if (cp->config != 0x00 && cp->config != 0x01)
6291 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6292 				         MGMT_STATUS_INVALID_PARAMS);
6293 
6294 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6295 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6296 				       MGMT_STATUS_NOT_SUPPORTED);
6297 
6298 	hci_dev_lock(hdev);
6299 
6300 	if (cp->config)
6301 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6302 	else
6303 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6304 
6305 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6306 	if (err < 0)
6307 		goto unlock;
6308 
6309 	if (!changed)
6310 		goto unlock;
6311 
6312 	err = new_options(hdev, sk);
6313 
6314 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6315 		mgmt_index_removed(hdev);
6316 
6317 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6318 			hci_dev_set_flag(hdev, HCI_CONFIG);
6319 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6320 
6321 			queue_work(hdev->req_workqueue, &hdev->power_on);
6322 		} else {
6323 			set_bit(HCI_RAW, &hdev->flags);
6324 			mgmt_index_added(hdev);
6325 		}
6326 	}
6327 
6328 unlock:
6329 	hci_dev_unlock(hdev);
6330 	return err;
6331 }
6332 
6333 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6334 			      void *data, u16 len)
6335 {
6336 	struct mgmt_cp_set_public_address *cp = data;
6337 	bool changed;
6338 	int err;
6339 
6340 	bt_dev_dbg(hdev, "sock %p", sk);
6341 
6342 	if (hdev_is_powered(hdev))
6343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6344 				       MGMT_STATUS_REJECTED);
6345 
6346 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6348 				       MGMT_STATUS_INVALID_PARAMS);
6349 
6350 	if (!hdev->set_bdaddr)
6351 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6352 				       MGMT_STATUS_NOT_SUPPORTED);
6353 
6354 	hci_dev_lock(hdev);
6355 
6356 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6357 	bacpy(&hdev->public_addr, &cp->bdaddr);
6358 
6359 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6360 	if (err < 0)
6361 		goto unlock;
6362 
6363 	if (!changed)
6364 		goto unlock;
6365 
6366 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6367 		err = new_options(hdev, sk);
6368 
6369 	if (is_configured(hdev)) {
6370 		mgmt_index_removed(hdev);
6371 
6372 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6373 
6374 		hci_dev_set_flag(hdev, HCI_CONFIG);
6375 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6376 
6377 		queue_work(hdev->req_workqueue, &hdev->power_on);
6378 	}
6379 
6380 unlock:
6381 	hci_dev_unlock(hdev);
6382 	return err;
6383 }
6384 
6385 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6386 					     u16 opcode, struct sk_buff *skb)
6387 {
6388 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6389 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6390 	u8 *h192, *r192, *h256, *r256;
6391 	struct mgmt_pending_cmd *cmd;
6392 	u16 eir_len;
6393 	int err;
6394 
6395 	bt_dev_dbg(hdev, "status %u", status);
6396 
6397 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6398 	if (!cmd)
6399 		return;
6400 
6401 	mgmt_cp = cmd->param;
6402 
6403 	if (status) {
6404 		status = mgmt_status(status);
6405 		eir_len = 0;
6406 
6407 		h192 = NULL;
6408 		r192 = NULL;
6409 		h256 = NULL;
6410 		r256 = NULL;
6411 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6412 		struct hci_rp_read_local_oob_data *rp;
6413 
6414 		if (skb->len != sizeof(*rp)) {
6415 			status = MGMT_STATUS_FAILED;
6416 			eir_len = 0;
6417 		} else {
6418 			status = MGMT_STATUS_SUCCESS;
6419 			rp = (void *)skb->data;
6420 
6421 			eir_len = 5 + 18 + 18;
6422 			h192 = rp->hash;
6423 			r192 = rp->rand;
6424 			h256 = NULL;
6425 			r256 = NULL;
6426 		}
6427 	} else {
6428 		struct hci_rp_read_local_oob_ext_data *rp;
6429 
6430 		if (skb->len != sizeof(*rp)) {
6431 			status = MGMT_STATUS_FAILED;
6432 			eir_len = 0;
6433 		} else {
6434 			status = MGMT_STATUS_SUCCESS;
6435 			rp = (void *)skb->data;
6436 
6437 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6438 				eir_len = 5 + 18 + 18;
6439 				h192 = NULL;
6440 				r192 = NULL;
6441 			} else {
6442 				eir_len = 5 + 18 + 18 + 18 + 18;
6443 				h192 = rp->hash192;
6444 				r192 = rp->rand192;
6445 			}
6446 
6447 			h256 = rp->hash256;
6448 			r256 = rp->rand256;
6449 		}
6450 	}
6451 
6452 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6453 	if (!mgmt_rp)
6454 		goto done;
6455 
6456 	if (status)
6457 		goto send_rsp;
6458 
6459 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6460 				  hdev->dev_class, 3);
6461 
6462 	if (h192 && r192) {
6463 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6464 					  EIR_SSP_HASH_C192, h192, 16);
6465 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6466 					  EIR_SSP_RAND_R192, r192, 16);
6467 	}
6468 
6469 	if (h256 && r256) {
6470 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6471 					  EIR_SSP_HASH_C256, h256, 16);
6472 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6473 					  EIR_SSP_RAND_R256, r256, 16);
6474 	}
6475 
6476 send_rsp:
6477 	mgmt_rp->type = mgmt_cp->type;
6478 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6479 
6480 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6481 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6482 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6483 	if (err < 0 || status)
6484 		goto done;
6485 
6486 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6487 
6488 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6489 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6490 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6491 done:
6492 	kfree(mgmt_rp);
6493 	mgmt_pending_remove(cmd);
6494 }
6495 
6496 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6497 				  struct mgmt_cp_read_local_oob_ext_data *cp)
6498 {
6499 	struct mgmt_pending_cmd *cmd;
6500 	struct hci_request req;
6501 	int err;
6502 
6503 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6504 			       cp, sizeof(*cp));
6505 	if (!cmd)
6506 		return -ENOMEM;
6507 
6508 	hci_req_init(&req, hdev);
6509 
6510 	if (bredr_sc_enabled(hdev))
6511 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6512 	else
6513 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6514 
6515 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6516 	if (err < 0) {
6517 		mgmt_pending_remove(cmd);
6518 		return err;
6519 	}
6520 
6521 	return 0;
6522 }
6523 
6524 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6525 				   void *data, u16 data_len)
6526 {
6527 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6528 	struct mgmt_rp_read_local_oob_ext_data *rp;
6529 	size_t rp_len;
6530 	u16 eir_len;
6531 	u8 status, flags, role, addr[7], hash[16], rand[16];
6532 	int err;
6533 
6534 	bt_dev_dbg(hdev, "sock %p", sk);
6535 
6536 	if (hdev_is_powered(hdev)) {
6537 		switch (cp->type) {
6538 		case BIT(BDADDR_BREDR):
6539 			status = mgmt_bredr_support(hdev);
6540 			if (status)
6541 				eir_len = 0;
6542 			else
6543 				eir_len = 5;
6544 			break;
6545 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6546 			status = mgmt_le_support(hdev);
6547 			if (status)
6548 				eir_len = 0;
6549 			else
6550 				eir_len = 9 + 3 + 18 + 18 + 3;
6551 			break;
6552 		default:
6553 			status = MGMT_STATUS_INVALID_PARAMS;
6554 			eir_len = 0;
6555 			break;
6556 		}
6557 	} else {
6558 		status = MGMT_STATUS_NOT_POWERED;
6559 		eir_len = 0;
6560 	}
6561 
6562 	rp_len = sizeof(*rp) + eir_len;
6563 	rp = kmalloc(rp_len, GFP_ATOMIC);
6564 	if (!rp)
6565 		return -ENOMEM;
6566 
6567 	if (status)
6568 		goto complete;
6569 
6570 	hci_dev_lock(hdev);
6571 
6572 	eir_len = 0;
6573 	switch (cp->type) {
6574 	case BIT(BDADDR_BREDR):
6575 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6576 			err = read_local_ssp_oob_req(hdev, sk, cp);
6577 			hci_dev_unlock(hdev);
6578 			if (!err)
6579 				goto done;
6580 
6581 			status = MGMT_STATUS_FAILED;
6582 			goto complete;
6583 		} else {
6584 			eir_len = eir_append_data(rp->eir, eir_len,
6585 						  EIR_CLASS_OF_DEV,
6586 						  hdev->dev_class, 3);
6587 		}
6588 		break;
6589 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6590 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6591 		    smp_generate_oob(hdev, hash, rand) < 0) {
6592 			hci_dev_unlock(hdev);
6593 			status = MGMT_STATUS_FAILED;
6594 			goto complete;
6595 		}
6596 
6597 		/* This should return the active RPA, but since the RPA
6598 		 * is only programmed on demand, it is really hard to fill
6599 		 * this in at the moment. For now disallow retrieving
6600 		 * local out-of-band data when privacy is in use.
6601 		 *
6602 		 * Returning the identity address will not help here since
6603 		 * pairing happens before the identity resolving key is
6604 		 * known and thus the connection establishment happens
6605 		 * based on the RPA and not the identity address.
6606 		 */
6607 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6608 			hci_dev_unlock(hdev);
6609 			status = MGMT_STATUS_REJECTED;
6610 			goto complete;
6611 		}
6612 
6613 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6614 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6615 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6616 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6617 			memcpy(addr, &hdev->static_addr, 6);
6618 			addr[6] = 0x01;
6619 		} else {
6620 			memcpy(addr, &hdev->bdaddr, 6);
6621 			addr[6] = 0x00;
6622 		}
6623 
6624 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6625 					  addr, sizeof(addr));
6626 
6627 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6628 			role = 0x02;
6629 		else
6630 			role = 0x01;
6631 
6632 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6633 					  &role, sizeof(role));
6634 
6635 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6636 			eir_len = eir_append_data(rp->eir, eir_len,
6637 						  EIR_LE_SC_CONFIRM,
6638 						  hash, sizeof(hash));
6639 
6640 			eir_len = eir_append_data(rp->eir, eir_len,
6641 						  EIR_LE_SC_RANDOM,
6642 						  rand, sizeof(rand));
6643 		}
6644 
6645 		flags = mgmt_get_adv_discov_flags(hdev);
6646 
6647 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6648 			flags |= LE_AD_NO_BREDR;
6649 
6650 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6651 					  &flags, sizeof(flags));
6652 		break;
6653 	}
6654 
6655 	hci_dev_unlock(hdev);
6656 
6657 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6658 
6659 	status = MGMT_STATUS_SUCCESS;
6660 
6661 complete:
6662 	rp->type = cp->type;
6663 	rp->eir_len = cpu_to_le16(eir_len);
6664 
6665 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6666 				status, rp, sizeof(*rp) + eir_len);
6667 	if (err < 0 || status)
6668 		goto done;
6669 
6670 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6671 				 rp, sizeof(*rp) + eir_len,
6672 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6673 
6674 done:
6675 	kfree(rp);
6676 
6677 	return err;
6678 }
6679 
6680 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6681 {
6682 	u32 flags = 0;
6683 
6684 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6685 	flags |= MGMT_ADV_FLAG_DISCOV;
6686 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6687 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6688 	flags |= MGMT_ADV_FLAG_APPEARANCE;
6689 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6690 
6691 	/* In extended adv TX_POWER returned from Set Adv Param
6692 	 * will be always valid.
6693 	 */
6694 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6695 	    ext_adv_capable(hdev))
6696 		flags |= MGMT_ADV_FLAG_TX_POWER;
6697 
6698 	if (ext_adv_capable(hdev)) {
6699 		flags |= MGMT_ADV_FLAG_SEC_1M;
6700 
6701 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
6702 			flags |= MGMT_ADV_FLAG_SEC_2M;
6703 
6704 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6705 			flags |= MGMT_ADV_FLAG_SEC_CODED;
6706 	}
6707 
6708 	return flags;
6709 }
6710 
6711 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6712 			     void *data, u16 data_len)
6713 {
6714 	struct mgmt_rp_read_adv_features *rp;
6715 	size_t rp_len;
6716 	int err;
6717 	struct adv_info *adv_instance;
6718 	u32 supported_flags;
6719 	u8 *instance;
6720 
6721 	bt_dev_dbg(hdev, "sock %p", sk);
6722 
6723 	if (!lmp_le_capable(hdev))
6724 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6725 				       MGMT_STATUS_REJECTED);
6726 
6727 	hci_dev_lock(hdev);
6728 
6729 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6730 	rp = kmalloc(rp_len, GFP_ATOMIC);
6731 	if (!rp) {
6732 		hci_dev_unlock(hdev);
6733 		return -ENOMEM;
6734 	}
6735 
6736 	supported_flags = get_supported_adv_flags(hdev);
6737 
6738 	rp->supported_flags = cpu_to_le32(supported_flags);
6739 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6740 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6741 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6742 	rp->num_instances = hdev->adv_instance_cnt;
6743 
6744 	instance = rp->instance;
6745 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6746 		*instance = adv_instance->instance;
6747 		instance++;
6748 	}
6749 
6750 	hci_dev_unlock(hdev);
6751 
6752 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6753 				MGMT_STATUS_SUCCESS, rp, rp_len);
6754 
6755 	kfree(rp);
6756 
6757 	return err;
6758 }
6759 
6760 static u8 calculate_name_len(struct hci_dev *hdev)
6761 {
6762 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6763 
6764 	return append_local_name(hdev, buf, 0);
6765 }
6766 
6767 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6768 			   bool is_adv_data)
6769 {
6770 	u8 max_len = HCI_MAX_AD_LENGTH;
6771 
6772 	if (is_adv_data) {
6773 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6774 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
6775 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
6776 			max_len -= 3;
6777 
6778 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6779 			max_len -= 3;
6780 	} else {
6781 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6782 			max_len -= calculate_name_len(hdev);
6783 
6784 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6785 			max_len -= 4;
6786 	}
6787 
6788 	return max_len;
6789 }
6790 
6791 static bool flags_managed(u32 adv_flags)
6792 {
6793 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6794 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
6795 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
6796 }
6797 
6798 static bool tx_power_managed(u32 adv_flags)
6799 {
6800 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6801 }
6802 
6803 static bool name_managed(u32 adv_flags)
6804 {
6805 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6806 }
6807 
6808 static bool appearance_managed(u32 adv_flags)
6809 {
6810 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6811 }
6812 
6813 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6814 			      u8 len, bool is_adv_data)
6815 {
6816 	int i, cur_len;
6817 	u8 max_len;
6818 
6819 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6820 
6821 	if (len > max_len)
6822 		return false;
6823 
6824 	/* Make sure that the data is correctly formatted. */
6825 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6826 		cur_len = data[i];
6827 
6828 		if (data[i + 1] == EIR_FLAGS &&
6829 		    (!is_adv_data || flags_managed(adv_flags)))
6830 			return false;
6831 
6832 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6833 			return false;
6834 
6835 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6836 			return false;
6837 
6838 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6839 			return false;
6840 
6841 		if (data[i + 1] == EIR_APPEARANCE &&
6842 		    appearance_managed(adv_flags))
6843 			return false;
6844 
6845 		/* If the current field length would exceed the total data
6846 		 * length, then it's invalid.
6847 		 */
6848 		if (i + cur_len >= len)
6849 			return false;
6850 	}
6851 
6852 	return true;
6853 }
6854 
6855 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6856 				     u16 opcode)
6857 {
6858 	struct mgmt_pending_cmd *cmd;
6859 	struct mgmt_cp_add_advertising *cp;
6860 	struct mgmt_rp_add_advertising rp;
6861 	struct adv_info *adv_instance, *n;
6862 	u8 instance;
6863 
6864 	bt_dev_dbg(hdev, "status %d", status);
6865 
6866 	hci_dev_lock(hdev);
6867 
6868 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6869 
6870 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6871 		if (!adv_instance->pending)
6872 			continue;
6873 
6874 		if (!status) {
6875 			adv_instance->pending = false;
6876 			continue;
6877 		}
6878 
6879 		instance = adv_instance->instance;
6880 
6881 		if (hdev->cur_adv_instance == instance)
6882 			cancel_adv_timeout(hdev);
6883 
6884 		hci_remove_adv_instance(hdev, instance);
6885 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6886 	}
6887 
6888 	if (!cmd)
6889 		goto unlock;
6890 
6891 	cp = cmd->param;
6892 	rp.instance = cp->instance;
6893 
6894 	if (status)
6895 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6896 				mgmt_status(status));
6897 	else
6898 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6899 				  mgmt_status(status), &rp, sizeof(rp));
6900 
6901 	mgmt_pending_remove(cmd);
6902 
6903 unlock:
6904 	hci_dev_unlock(hdev);
6905 }
6906 
6907 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6908 			   void *data, u16 data_len)
6909 {
6910 	struct mgmt_cp_add_advertising *cp = data;
6911 	struct mgmt_rp_add_advertising rp;
6912 	u32 flags;
6913 	u32 supported_flags, phy_flags;
6914 	u8 status;
6915 	u16 timeout, duration;
6916 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6917 	u8 schedule_instance = 0;
6918 	struct adv_info *next_instance;
6919 	int err;
6920 	struct mgmt_pending_cmd *cmd;
6921 	struct hci_request req;
6922 
6923 	bt_dev_dbg(hdev, "sock %p", sk);
6924 
6925 	status = mgmt_le_support(hdev);
6926 	if (status)
6927 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6928 				       status);
6929 
6930 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6931 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6932 				       MGMT_STATUS_INVALID_PARAMS);
6933 
6934 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6935 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6936 				       MGMT_STATUS_INVALID_PARAMS);
6937 
6938 	flags = __le32_to_cpu(cp->flags);
6939 	timeout = __le16_to_cpu(cp->timeout);
6940 	duration = __le16_to_cpu(cp->duration);
6941 
6942 	/* The current implementation only supports a subset of the specified
6943 	 * flags. Also need to check mutual exclusiveness of sec flags.
6944 	 */
6945 	supported_flags = get_supported_adv_flags(hdev);
6946 	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6947 	if (flags & ~supported_flags ||
6948 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6949 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6950 				       MGMT_STATUS_INVALID_PARAMS);
6951 
6952 	hci_dev_lock(hdev);
6953 
6954 	if (timeout && !hdev_is_powered(hdev)) {
6955 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6956 				      MGMT_STATUS_REJECTED);
6957 		goto unlock;
6958 	}
6959 
6960 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6961 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6962 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6963 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6964 				      MGMT_STATUS_BUSY);
6965 		goto unlock;
6966 	}
6967 
6968 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6969 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6970 			       cp->scan_rsp_len, false)) {
6971 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6972 				      MGMT_STATUS_INVALID_PARAMS);
6973 		goto unlock;
6974 	}
6975 
6976 	err = hci_add_adv_instance(hdev, cp->instance, flags,
6977 				   cp->adv_data_len, cp->data,
6978 				   cp->scan_rsp_len,
6979 				   cp->data + cp->adv_data_len,
6980 				   timeout, duration);
6981 	if (err < 0) {
6982 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6983 				      MGMT_STATUS_FAILED);
6984 		goto unlock;
6985 	}
6986 
6987 	/* Only trigger an advertising added event if a new instance was
6988 	 * actually added.
6989 	 */
6990 	if (hdev->adv_instance_cnt > prev_instance_cnt)
6991 		mgmt_advertising_added(sk, hdev, cp->instance);
6992 
6993 	if (hdev->cur_adv_instance == cp->instance) {
6994 		/* If the currently advertised instance is being changed then
6995 		 * cancel the current advertising and schedule the next
6996 		 * instance. If there is only one instance then the overridden
6997 		 * advertising data will be visible right away.
6998 		 */
6999 		cancel_adv_timeout(hdev);
7000 
7001 		next_instance = hci_get_next_instance(hdev, cp->instance);
7002 		if (next_instance)
7003 			schedule_instance = next_instance->instance;
7004 	} else if (!hdev->adv_instance_timeout) {
7005 		/* Immediately advertise the new instance if no other
7006 		 * instance is currently being advertised.
7007 		 */
7008 		schedule_instance = cp->instance;
7009 	}
7010 
7011 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7012 	 * there is no instance to be advertised then we have no HCI
7013 	 * communication to make. Simply return.
7014 	 */
7015 	if (!hdev_is_powered(hdev) ||
7016 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7017 	    !schedule_instance) {
7018 		rp.instance = cp->instance;
7019 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7020 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7021 		goto unlock;
7022 	}
7023 
7024 	/* We're good to go, update advertising data, parameters, and start
7025 	 * advertising.
7026 	 */
7027 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7028 			       data_len);
7029 	if (!cmd) {
7030 		err = -ENOMEM;
7031 		goto unlock;
7032 	}
7033 
7034 	hci_req_init(&req, hdev);
7035 
7036 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7037 
7038 	if (!err)
7039 		err = hci_req_run(&req, add_advertising_complete);
7040 
7041 	if (err < 0) {
7042 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7043 				      MGMT_STATUS_FAILED);
7044 		mgmt_pending_remove(cmd);
7045 	}
7046 
7047 unlock:
7048 	hci_dev_unlock(hdev);
7049 
7050 	return err;
7051 }
7052 
7053 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7054 					u16 opcode)
7055 {
7056 	struct mgmt_pending_cmd *cmd;
7057 	struct mgmt_cp_remove_advertising *cp;
7058 	struct mgmt_rp_remove_advertising rp;
7059 
7060 	bt_dev_dbg(hdev, "status %d", status);
7061 
7062 	hci_dev_lock(hdev);
7063 
7064 	/* A failure status here only means that we failed to disable
7065 	 * advertising. Otherwise, the advertising instance has been removed,
7066 	 * so report success.
7067 	 */
7068 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7069 	if (!cmd)
7070 		goto unlock;
7071 
7072 	cp = cmd->param;
7073 	rp.instance = cp->instance;
7074 
7075 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7076 			  &rp, sizeof(rp));
7077 	mgmt_pending_remove(cmd);
7078 
7079 unlock:
7080 	hci_dev_unlock(hdev);
7081 }
7082 
7083 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7084 			      void *data, u16 data_len)
7085 {
7086 	struct mgmt_cp_remove_advertising *cp = data;
7087 	struct mgmt_rp_remove_advertising rp;
7088 	struct mgmt_pending_cmd *cmd;
7089 	struct hci_request req;
7090 	int err;
7091 
7092 	bt_dev_dbg(hdev, "sock %p", sk);
7093 
7094 	hci_dev_lock(hdev);
7095 
7096 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7097 		err = mgmt_cmd_status(sk, hdev->id,
7098 				      MGMT_OP_REMOVE_ADVERTISING,
7099 				      MGMT_STATUS_INVALID_PARAMS);
7100 		goto unlock;
7101 	}
7102 
7103 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7104 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7105 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7106 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7107 				      MGMT_STATUS_BUSY);
7108 		goto unlock;
7109 	}
7110 
7111 	if (list_empty(&hdev->adv_instances)) {
7112 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7113 				      MGMT_STATUS_INVALID_PARAMS);
7114 		goto unlock;
7115 	}
7116 
7117 	hci_req_init(&req, hdev);
7118 
7119 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7120 
7121 	if (list_empty(&hdev->adv_instances))
7122 		__hci_req_disable_advertising(&req);
7123 
7124 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
7125 	 * flag is set or the device isn't powered then we have no HCI
7126 	 * communication to make. Simply return.
7127 	 */
7128 	if (skb_queue_empty(&req.cmd_q) ||
7129 	    !hdev_is_powered(hdev) ||
7130 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7131 		hci_req_purge(&req);
7132 		rp.instance = cp->instance;
7133 		err = mgmt_cmd_complete(sk, hdev->id,
7134 					MGMT_OP_REMOVE_ADVERTISING,
7135 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7136 		goto unlock;
7137 	}
7138 
7139 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7140 			       data_len);
7141 	if (!cmd) {
7142 		err = -ENOMEM;
7143 		goto unlock;
7144 	}
7145 
7146 	err = hci_req_run(&req, remove_advertising_complete);
7147 	if (err < 0)
7148 		mgmt_pending_remove(cmd);
7149 
7150 unlock:
7151 	hci_dev_unlock(hdev);
7152 
7153 	return err;
7154 }
7155 
7156 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7157 			     void *data, u16 data_len)
7158 {
7159 	struct mgmt_cp_get_adv_size_info *cp = data;
7160 	struct mgmt_rp_get_adv_size_info rp;
7161 	u32 flags, supported_flags;
7162 	int err;
7163 
7164 	bt_dev_dbg(hdev, "sock %p", sk);
7165 
7166 	if (!lmp_le_capable(hdev))
7167 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7168 				       MGMT_STATUS_REJECTED);
7169 
7170 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7171 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7172 				       MGMT_STATUS_INVALID_PARAMS);
7173 
7174 	flags = __le32_to_cpu(cp->flags);
7175 
7176 	/* The current implementation only supports a subset of the specified
7177 	 * flags.
7178 	 */
7179 	supported_flags = get_supported_adv_flags(hdev);
7180 	if (flags & ~supported_flags)
7181 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7182 				       MGMT_STATUS_INVALID_PARAMS);
7183 
7184 	rp.instance = cp->instance;
7185 	rp.flags = cp->flags;
7186 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7187 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7188 
7189 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7190 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7191 
7192 	return err;
7193 }
7194 
7195 static const struct hci_mgmt_handler mgmt_handlers[] = {
7196 	{ NULL }, /* 0x0000 (no command) */
7197 	{ read_version,            MGMT_READ_VERSION_SIZE,
7198 						HCI_MGMT_NO_HDEV |
7199 						HCI_MGMT_UNTRUSTED },
7200 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7201 						HCI_MGMT_NO_HDEV |
7202 						HCI_MGMT_UNTRUSTED },
7203 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7204 						HCI_MGMT_NO_HDEV |
7205 						HCI_MGMT_UNTRUSTED },
7206 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7207 						HCI_MGMT_UNTRUSTED },
7208 	{ set_powered,             MGMT_SETTING_SIZE },
7209 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7210 	{ set_connectable,         MGMT_SETTING_SIZE },
7211 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7212 	{ set_bondable,            MGMT_SETTING_SIZE },
7213 	{ set_link_security,       MGMT_SETTING_SIZE },
7214 	{ set_ssp,                 MGMT_SETTING_SIZE },
7215 	{ set_hs,                  MGMT_SETTING_SIZE },
7216 	{ set_le,                  MGMT_SETTING_SIZE },
7217 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7218 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7219 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7220 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7221 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7222 						HCI_MGMT_VAR_LEN },
7223 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7224 						HCI_MGMT_VAR_LEN },
7225 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7226 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7227 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7228 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7229 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7230 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7231 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7232 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7233 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7234 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7235 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7236 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7237 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7238 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7239 						HCI_MGMT_VAR_LEN },
7240 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7241 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7242 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7243 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7244 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7245 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7246 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7247 	{ set_advertising,         MGMT_SETTING_SIZE },
7248 	{ set_bredr,               MGMT_SETTING_SIZE },
7249 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7250 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7251 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7252 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7253 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7254 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7255 						HCI_MGMT_VAR_LEN },
7256 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7257 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7258 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7259 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7260 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7261 						HCI_MGMT_VAR_LEN },
7262 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7263 						HCI_MGMT_NO_HDEV |
7264 						HCI_MGMT_UNTRUSTED },
7265 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7266 						HCI_MGMT_UNCONFIGURED |
7267 						HCI_MGMT_UNTRUSTED },
7268 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7269 						HCI_MGMT_UNCONFIGURED },
7270 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7271 						HCI_MGMT_UNCONFIGURED },
7272 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7273 						HCI_MGMT_VAR_LEN },
7274 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7275 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7276 						HCI_MGMT_NO_HDEV |
7277 						HCI_MGMT_UNTRUSTED },
7278 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7279 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7280 						HCI_MGMT_VAR_LEN },
7281 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7282 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
7283 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7284 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7285 						HCI_MGMT_UNTRUSTED },
7286 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
7287 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
7288 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
7289 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7290 						HCI_MGMT_VAR_LEN },
7291 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
7292 	{ read_security_info,      MGMT_READ_SECURITY_INFO_SIZE,
7293 						HCI_MGMT_UNTRUSTED },
7294 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
7295 						HCI_MGMT_UNTRUSTED |
7296 						HCI_MGMT_HDEV_OPTIONAL },
7297 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
7298 						HCI_MGMT_VAR_LEN |
7299 						HCI_MGMT_HDEV_OPTIONAL },
7300 };
7301 
7302 void mgmt_index_added(struct hci_dev *hdev)
7303 {
7304 	struct mgmt_ev_ext_index ev;
7305 
7306 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7307 		return;
7308 
7309 	switch (hdev->dev_type) {
7310 	case HCI_PRIMARY:
7311 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7312 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7313 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7314 			ev.type = 0x01;
7315 		} else {
7316 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7317 					 HCI_MGMT_INDEX_EVENTS);
7318 			ev.type = 0x00;
7319 		}
7320 		break;
7321 	case HCI_AMP:
7322 		ev.type = 0x02;
7323 		break;
7324 	default:
7325 		return;
7326 	}
7327 
7328 	ev.bus = hdev->bus;
7329 
7330 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7331 			 HCI_MGMT_EXT_INDEX_EVENTS);
7332 }
7333 
7334 void mgmt_index_removed(struct hci_dev *hdev)
7335 {
7336 	struct mgmt_ev_ext_index ev;
7337 	u8 status = MGMT_STATUS_INVALID_INDEX;
7338 
7339 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7340 		return;
7341 
7342 	switch (hdev->dev_type) {
7343 	case HCI_PRIMARY:
7344 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7345 
7346 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7347 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7348 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7349 			ev.type = 0x01;
7350 		} else {
7351 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7352 					 HCI_MGMT_INDEX_EVENTS);
7353 			ev.type = 0x00;
7354 		}
7355 		break;
7356 	case HCI_AMP:
7357 		ev.type = 0x02;
7358 		break;
7359 	default:
7360 		return;
7361 	}
7362 
7363 	ev.bus = hdev->bus;
7364 
7365 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7366 			 HCI_MGMT_EXT_INDEX_EVENTS);
7367 }
7368 
7369 /* This function requires the caller holds hdev->lock */
7370 static void restart_le_actions(struct hci_dev *hdev)
7371 {
7372 	struct hci_conn_params *p;
7373 
7374 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7375 		/* Needed for AUTO_OFF case where might not "really"
7376 		 * have been powered off.
7377 		 */
7378 		list_del_init(&p->action);
7379 
7380 		switch (p->auto_connect) {
7381 		case HCI_AUTO_CONN_DIRECT:
7382 		case HCI_AUTO_CONN_ALWAYS:
7383 			list_add(&p->action, &hdev->pend_le_conns);
7384 			break;
7385 		case HCI_AUTO_CONN_REPORT:
7386 			list_add(&p->action, &hdev->pend_le_reports);
7387 			break;
7388 		default:
7389 			break;
7390 		}
7391 	}
7392 }
7393 
7394 void mgmt_power_on(struct hci_dev *hdev, int err)
7395 {
7396 	struct cmd_lookup match = { NULL, hdev };
7397 
7398 	bt_dev_dbg(hdev, "err %d", err);
7399 
7400 	hci_dev_lock(hdev);
7401 
7402 	if (!err) {
7403 		restart_le_actions(hdev);
7404 		hci_update_background_scan(hdev);
7405 	}
7406 
7407 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7408 
7409 	new_settings(hdev, match.sk);
7410 
7411 	if (match.sk)
7412 		sock_put(match.sk);
7413 
7414 	hci_dev_unlock(hdev);
7415 }
7416 
7417 void __mgmt_power_off(struct hci_dev *hdev)
7418 {
7419 	struct cmd_lookup match = { NULL, hdev };
7420 	u8 status, zero_cod[] = { 0, 0, 0 };
7421 
7422 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7423 
7424 	/* If the power off is because of hdev unregistration let
7425 	 * use the appropriate INVALID_INDEX status. Otherwise use
7426 	 * NOT_POWERED. We cover both scenarios here since later in
7427 	 * mgmt_index_removed() any hci_conn callbacks will have already
7428 	 * been triggered, potentially causing misleading DISCONNECTED
7429 	 * status responses.
7430 	 */
7431 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7432 		status = MGMT_STATUS_INVALID_INDEX;
7433 	else
7434 		status = MGMT_STATUS_NOT_POWERED;
7435 
7436 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7437 
7438 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7439 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7440 				   zero_cod, sizeof(zero_cod),
7441 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7442 		ext_info_changed(hdev, NULL);
7443 	}
7444 
7445 	new_settings(hdev, match.sk);
7446 
7447 	if (match.sk)
7448 		sock_put(match.sk);
7449 }
7450 
7451 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7452 {
7453 	struct mgmt_pending_cmd *cmd;
7454 	u8 status;
7455 
7456 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7457 	if (!cmd)
7458 		return;
7459 
7460 	if (err == -ERFKILL)
7461 		status = MGMT_STATUS_RFKILLED;
7462 	else
7463 		status = MGMT_STATUS_FAILED;
7464 
7465 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7466 
7467 	mgmt_pending_remove(cmd);
7468 }
7469 
7470 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7471 		       bool persistent)
7472 {
7473 	struct mgmt_ev_new_link_key ev;
7474 
7475 	memset(&ev, 0, sizeof(ev));
7476 
7477 	ev.store_hint = persistent;
7478 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7479 	ev.key.addr.type = BDADDR_BREDR;
7480 	ev.key.type = key->type;
7481 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7482 	ev.key.pin_len = key->pin_len;
7483 
7484 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7485 }
7486 
7487 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7488 {
7489 	switch (ltk->type) {
7490 	case SMP_LTK:
7491 	case SMP_LTK_SLAVE:
7492 		if (ltk->authenticated)
7493 			return MGMT_LTK_AUTHENTICATED;
7494 		return MGMT_LTK_UNAUTHENTICATED;
7495 	case SMP_LTK_P256:
7496 		if (ltk->authenticated)
7497 			return MGMT_LTK_P256_AUTH;
7498 		return MGMT_LTK_P256_UNAUTH;
7499 	case SMP_LTK_P256_DEBUG:
7500 		return MGMT_LTK_P256_DEBUG;
7501 	}
7502 
7503 	return MGMT_LTK_UNAUTHENTICATED;
7504 }
7505 
7506 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7507 {
7508 	struct mgmt_ev_new_long_term_key ev;
7509 
7510 	memset(&ev, 0, sizeof(ev));
7511 
7512 	/* Devices using resolvable or non-resolvable random addresses
7513 	 * without providing an identity resolving key don't require
7514 	 * to store long term keys. Their addresses will change the
7515 	 * next time around.
7516 	 *
7517 	 * Only when a remote device provides an identity address
7518 	 * make sure the long term key is stored. If the remote
7519 	 * identity is known, the long term keys are internally
7520 	 * mapped to the identity address. So allow static random
7521 	 * and public addresses here.
7522 	 */
7523 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7524 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7525 		ev.store_hint = 0x00;
7526 	else
7527 		ev.store_hint = persistent;
7528 
7529 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7530 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7531 	ev.key.type = mgmt_ltk_type(key);
7532 	ev.key.enc_size = key->enc_size;
7533 	ev.key.ediv = key->ediv;
7534 	ev.key.rand = key->rand;
7535 
7536 	if (key->type == SMP_LTK)
7537 		ev.key.master = 1;
7538 
7539 	/* Make sure we copy only the significant bytes based on the
7540 	 * encryption key size, and set the rest of the value to zeroes.
7541 	 */
7542 	memcpy(ev.key.val, key->val, key->enc_size);
7543 	memset(ev.key.val + key->enc_size, 0,
7544 	       sizeof(ev.key.val) - key->enc_size);
7545 
7546 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7547 }
7548 
7549 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7550 {
7551 	struct mgmt_ev_new_irk ev;
7552 
7553 	memset(&ev, 0, sizeof(ev));
7554 
7555 	ev.store_hint = persistent;
7556 
7557 	bacpy(&ev.rpa, &irk->rpa);
7558 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7559 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7560 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7561 
7562 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7563 }
7564 
7565 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7566 		   bool persistent)
7567 {
7568 	struct mgmt_ev_new_csrk ev;
7569 
7570 	memset(&ev, 0, sizeof(ev));
7571 
7572 	/* Devices using resolvable or non-resolvable random addresses
7573 	 * without providing an identity resolving key don't require
7574 	 * to store signature resolving keys. Their addresses will change
7575 	 * the next time around.
7576 	 *
7577 	 * Only when a remote device provides an identity address
7578 	 * make sure the signature resolving key is stored. So allow
7579 	 * static random and public addresses here.
7580 	 */
7581 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7582 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7583 		ev.store_hint = 0x00;
7584 	else
7585 		ev.store_hint = persistent;
7586 
7587 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7588 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7589 	ev.key.type = csrk->type;
7590 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7591 
7592 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7593 }
7594 
7595 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7596 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7597 			 u16 max_interval, u16 latency, u16 timeout)
7598 {
7599 	struct mgmt_ev_new_conn_param ev;
7600 
7601 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7602 		return;
7603 
7604 	memset(&ev, 0, sizeof(ev));
7605 	bacpy(&ev.addr.bdaddr, bdaddr);
7606 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7607 	ev.store_hint = store_hint;
7608 	ev.min_interval = cpu_to_le16(min_interval);
7609 	ev.max_interval = cpu_to_le16(max_interval);
7610 	ev.latency = cpu_to_le16(latency);
7611 	ev.timeout = cpu_to_le16(timeout);
7612 
7613 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7614 }
7615 
7616 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7617 			   u32 flags, u8 *name, u8 name_len)
7618 {
7619 	char buf[512];
7620 	struct mgmt_ev_device_connected *ev = (void *) buf;
7621 	u16 eir_len = 0;
7622 
7623 	bacpy(&ev->addr.bdaddr, &conn->dst);
7624 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7625 
7626 	ev->flags = __cpu_to_le32(flags);
7627 
7628 	/* We must ensure that the EIR Data fields are ordered and
7629 	 * unique. Keep it simple for now and avoid the problem by not
7630 	 * adding any BR/EDR data to the LE adv.
7631 	 */
7632 	if (conn->le_adv_data_len > 0) {
7633 		memcpy(&ev->eir[eir_len],
7634 		       conn->le_adv_data, conn->le_adv_data_len);
7635 		eir_len = conn->le_adv_data_len;
7636 	} else {
7637 		if (name_len > 0)
7638 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7639 						  name, name_len);
7640 
7641 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7642 			eir_len = eir_append_data(ev->eir, eir_len,
7643 						  EIR_CLASS_OF_DEV,
7644 						  conn->dev_class, 3);
7645 	}
7646 
7647 	ev->eir_len = cpu_to_le16(eir_len);
7648 
7649 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7650 		    sizeof(*ev) + eir_len, NULL);
7651 }
7652 
7653 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7654 {
7655 	struct sock **sk = data;
7656 
7657 	cmd->cmd_complete(cmd, 0);
7658 
7659 	*sk = cmd->sk;
7660 	sock_hold(*sk);
7661 
7662 	mgmt_pending_remove(cmd);
7663 }
7664 
7665 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7666 {
7667 	struct hci_dev *hdev = data;
7668 	struct mgmt_cp_unpair_device *cp = cmd->param;
7669 
7670 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7671 
7672 	cmd->cmd_complete(cmd, 0);
7673 	mgmt_pending_remove(cmd);
7674 }
7675 
7676 bool mgmt_powering_down(struct hci_dev *hdev)
7677 {
7678 	struct mgmt_pending_cmd *cmd;
7679 	struct mgmt_mode *cp;
7680 
7681 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7682 	if (!cmd)
7683 		return false;
7684 
7685 	cp = cmd->param;
7686 	if (!cp->val)
7687 		return true;
7688 
7689 	return false;
7690 }
7691 
7692 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7693 			      u8 link_type, u8 addr_type, u8 reason,
7694 			      bool mgmt_connected)
7695 {
7696 	struct mgmt_ev_device_disconnected ev;
7697 	struct sock *sk = NULL;
7698 
7699 	/* The connection is still in hci_conn_hash so test for 1
7700 	 * instead of 0 to know if this is the last one.
7701 	 */
7702 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7703 		cancel_delayed_work(&hdev->power_off);
7704 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7705 	}
7706 
7707 	if (!mgmt_connected)
7708 		return;
7709 
7710 	if (link_type != ACL_LINK && link_type != LE_LINK)
7711 		return;
7712 
7713 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7714 
7715 	bacpy(&ev.addr.bdaddr, bdaddr);
7716 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7717 	ev.reason = reason;
7718 
7719 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7720 
7721 	if (sk)
7722 		sock_put(sk);
7723 
7724 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7725 			     hdev);
7726 }
7727 
7728 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7729 			    u8 link_type, u8 addr_type, u8 status)
7730 {
7731 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7732 	struct mgmt_cp_disconnect *cp;
7733 	struct mgmt_pending_cmd *cmd;
7734 
7735 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7736 			     hdev);
7737 
7738 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7739 	if (!cmd)
7740 		return;
7741 
7742 	cp = cmd->param;
7743 
7744 	if (bacmp(bdaddr, &cp->addr.bdaddr))
7745 		return;
7746 
7747 	if (cp->addr.type != bdaddr_type)
7748 		return;
7749 
7750 	cmd->cmd_complete(cmd, mgmt_status(status));
7751 	mgmt_pending_remove(cmd);
7752 }
7753 
7754 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7755 			 u8 addr_type, u8 status)
7756 {
7757 	struct mgmt_ev_connect_failed ev;
7758 
7759 	/* The connection is still in hci_conn_hash so test for 1
7760 	 * instead of 0 to know if this is the last one.
7761 	 */
7762 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7763 		cancel_delayed_work(&hdev->power_off);
7764 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7765 	}
7766 
7767 	bacpy(&ev.addr.bdaddr, bdaddr);
7768 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7769 	ev.status = mgmt_status(status);
7770 
7771 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7772 }
7773 
7774 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7775 {
7776 	struct mgmt_ev_pin_code_request ev;
7777 
7778 	bacpy(&ev.addr.bdaddr, bdaddr);
7779 	ev.addr.type = BDADDR_BREDR;
7780 	ev.secure = secure;
7781 
7782 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7783 }
7784 
7785 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7786 				  u8 status)
7787 {
7788 	struct mgmt_pending_cmd *cmd;
7789 
7790 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7791 	if (!cmd)
7792 		return;
7793 
7794 	cmd->cmd_complete(cmd, mgmt_status(status));
7795 	mgmt_pending_remove(cmd);
7796 }
7797 
7798 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7799 				      u8 status)
7800 {
7801 	struct mgmt_pending_cmd *cmd;
7802 
7803 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7804 	if (!cmd)
7805 		return;
7806 
7807 	cmd->cmd_complete(cmd, mgmt_status(status));
7808 	mgmt_pending_remove(cmd);
7809 }
7810 
7811 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7812 			      u8 link_type, u8 addr_type, u32 value,
7813 			      u8 confirm_hint)
7814 {
7815 	struct mgmt_ev_user_confirm_request ev;
7816 
7817 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
7818 
7819 	bacpy(&ev.addr.bdaddr, bdaddr);
7820 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7821 	ev.confirm_hint = confirm_hint;
7822 	ev.value = cpu_to_le32(value);
7823 
7824 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7825 			  NULL);
7826 }
7827 
7828 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7829 			      u8 link_type, u8 addr_type)
7830 {
7831 	struct mgmt_ev_user_passkey_request ev;
7832 
7833 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
7834 
7835 	bacpy(&ev.addr.bdaddr, bdaddr);
7836 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7837 
7838 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7839 			  NULL);
7840 }
7841 
7842 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7843 				      u8 link_type, u8 addr_type, u8 status,
7844 				      u8 opcode)
7845 {
7846 	struct mgmt_pending_cmd *cmd;
7847 
7848 	cmd = pending_find(opcode, hdev);
7849 	if (!cmd)
7850 		return -ENOENT;
7851 
7852 	cmd->cmd_complete(cmd, mgmt_status(status));
7853 	mgmt_pending_remove(cmd);
7854 
7855 	return 0;
7856 }
7857 
7858 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7859 				     u8 link_type, u8 addr_type, u8 status)
7860 {
7861 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7862 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7863 }
7864 
7865 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7866 					 u8 link_type, u8 addr_type, u8 status)
7867 {
7868 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7869 					  status,
7870 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7871 }
7872 
7873 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7874 				     u8 link_type, u8 addr_type, u8 status)
7875 {
7876 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7877 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7878 }
7879 
7880 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7881 					 u8 link_type, u8 addr_type, u8 status)
7882 {
7883 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7884 					  status,
7885 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7886 }
7887 
7888 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7889 			     u8 link_type, u8 addr_type, u32 passkey,
7890 			     u8 entered)
7891 {
7892 	struct mgmt_ev_passkey_notify ev;
7893 
7894 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
7895 
7896 	bacpy(&ev.addr.bdaddr, bdaddr);
7897 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7898 	ev.passkey = __cpu_to_le32(passkey);
7899 	ev.entered = entered;
7900 
7901 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7902 }
7903 
7904 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7905 {
7906 	struct mgmt_ev_auth_failed ev;
7907 	struct mgmt_pending_cmd *cmd;
7908 	u8 status = mgmt_status(hci_status);
7909 
7910 	bacpy(&ev.addr.bdaddr, &conn->dst);
7911 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7912 	ev.status = status;
7913 
7914 	cmd = find_pairing(conn);
7915 
7916 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7917 		    cmd ? cmd->sk : NULL);
7918 
7919 	if (cmd) {
7920 		cmd->cmd_complete(cmd, status);
7921 		mgmt_pending_remove(cmd);
7922 	}
7923 }
7924 
7925 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7926 {
7927 	struct cmd_lookup match = { NULL, hdev };
7928 	bool changed;
7929 
7930 	if (status) {
7931 		u8 mgmt_err = mgmt_status(status);
7932 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7933 				     cmd_status_rsp, &mgmt_err);
7934 		return;
7935 	}
7936 
7937 	if (test_bit(HCI_AUTH, &hdev->flags))
7938 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7939 	else
7940 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7941 
7942 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7943 			     &match);
7944 
7945 	if (changed)
7946 		new_settings(hdev, match.sk);
7947 
7948 	if (match.sk)
7949 		sock_put(match.sk);
7950 }
7951 
7952 static void clear_eir(struct hci_request *req)
7953 {
7954 	struct hci_dev *hdev = req->hdev;
7955 	struct hci_cp_write_eir cp;
7956 
7957 	if (!lmp_ext_inq_capable(hdev))
7958 		return;
7959 
7960 	memset(hdev->eir, 0, sizeof(hdev->eir));
7961 
7962 	memset(&cp, 0, sizeof(cp));
7963 
7964 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7965 }
7966 
7967 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7968 {
7969 	struct cmd_lookup match = { NULL, hdev };
7970 	struct hci_request req;
7971 	bool changed = false;
7972 
7973 	if (status) {
7974 		u8 mgmt_err = mgmt_status(status);
7975 
7976 		if (enable && hci_dev_test_and_clear_flag(hdev,
7977 							  HCI_SSP_ENABLED)) {
7978 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7979 			new_settings(hdev, NULL);
7980 		}
7981 
7982 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7983 				     &mgmt_err);
7984 		return;
7985 	}
7986 
7987 	if (enable) {
7988 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7989 	} else {
7990 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7991 		if (!changed)
7992 			changed = hci_dev_test_and_clear_flag(hdev,
7993 							      HCI_HS_ENABLED);
7994 		else
7995 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7996 	}
7997 
7998 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7999 
8000 	if (changed)
8001 		new_settings(hdev, match.sk);
8002 
8003 	if (match.sk)
8004 		sock_put(match.sk);
8005 
8006 	hci_req_init(&req, hdev);
8007 
8008 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8009 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8010 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8011 				    sizeof(enable), &enable);
8012 		__hci_req_update_eir(&req);
8013 	} else {
8014 		clear_eir(&req);
8015 	}
8016 
8017 	hci_req_run(&req, NULL);
8018 }
8019 
8020 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8021 {
8022 	struct cmd_lookup *match = data;
8023 
8024 	if (match->sk == NULL) {
8025 		match->sk = cmd->sk;
8026 		sock_hold(match->sk);
8027 	}
8028 }
8029 
8030 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8031 				    u8 status)
8032 {
8033 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8034 
8035 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8036 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8037 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8038 
8039 	if (!status) {
8040 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8041 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8042 		ext_info_changed(hdev, NULL);
8043 	}
8044 
8045 	if (match.sk)
8046 		sock_put(match.sk);
8047 }
8048 
8049 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8050 {
8051 	struct mgmt_cp_set_local_name ev;
8052 	struct mgmt_pending_cmd *cmd;
8053 
8054 	if (status)
8055 		return;
8056 
8057 	memset(&ev, 0, sizeof(ev));
8058 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8059 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8060 
8061 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8062 	if (!cmd) {
8063 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8064 
8065 		/* If this is a HCI command related to powering on the
8066 		 * HCI dev don't send any mgmt signals.
8067 		 */
8068 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8069 			return;
8070 	}
8071 
8072 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8073 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8074 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8075 }
8076 
8077 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8078 {
8079 	int i;
8080 
8081 	for (i = 0; i < uuid_count; i++) {
8082 		if (!memcmp(uuid, uuids[i], 16))
8083 			return true;
8084 	}
8085 
8086 	return false;
8087 }
8088 
8089 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8090 {
8091 	u16 parsed = 0;
8092 
8093 	while (parsed < eir_len) {
8094 		u8 field_len = eir[0];
8095 		u8 uuid[16];
8096 		int i;
8097 
8098 		if (field_len == 0)
8099 			break;
8100 
8101 		if (eir_len - parsed < field_len + 1)
8102 			break;
8103 
8104 		switch (eir[1]) {
8105 		case EIR_UUID16_ALL:
8106 		case EIR_UUID16_SOME:
8107 			for (i = 0; i + 3 <= field_len; i += 2) {
8108 				memcpy(uuid, bluetooth_base_uuid, 16);
8109 				uuid[13] = eir[i + 3];
8110 				uuid[12] = eir[i + 2];
8111 				if (has_uuid(uuid, uuid_count, uuids))
8112 					return true;
8113 			}
8114 			break;
8115 		case EIR_UUID32_ALL:
8116 		case EIR_UUID32_SOME:
8117 			for (i = 0; i + 5 <= field_len; i += 4) {
8118 				memcpy(uuid, bluetooth_base_uuid, 16);
8119 				uuid[15] = eir[i + 5];
8120 				uuid[14] = eir[i + 4];
8121 				uuid[13] = eir[i + 3];
8122 				uuid[12] = eir[i + 2];
8123 				if (has_uuid(uuid, uuid_count, uuids))
8124 					return true;
8125 			}
8126 			break;
8127 		case EIR_UUID128_ALL:
8128 		case EIR_UUID128_SOME:
8129 			for (i = 0; i + 17 <= field_len; i += 16) {
8130 				memcpy(uuid, eir + i + 2, 16);
8131 				if (has_uuid(uuid, uuid_count, uuids))
8132 					return true;
8133 			}
8134 			break;
8135 		}
8136 
8137 		parsed += field_len + 1;
8138 		eir += field_len + 1;
8139 	}
8140 
8141 	return false;
8142 }
8143 
8144 static void restart_le_scan(struct hci_dev *hdev)
8145 {
8146 	/* If controller is not scanning we are done. */
8147 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8148 		return;
8149 
8150 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8151 		       hdev->discovery.scan_start +
8152 		       hdev->discovery.scan_duration))
8153 		return;
8154 
8155 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8156 			   DISCOV_LE_RESTART_DELAY);
8157 }
8158 
8159 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8160 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8161 {
8162 	/* If a RSSI threshold has been specified, and
8163 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8164 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8165 	 * is set, let it through for further processing, as we might need to
8166 	 * restart the scan.
8167 	 *
8168 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8169 	 * the results are also dropped.
8170 	 */
8171 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8172 	    (rssi == HCI_RSSI_INVALID ||
8173 	    (rssi < hdev->discovery.rssi &&
8174 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8175 		return  false;
8176 
8177 	if (hdev->discovery.uuid_count != 0) {
8178 		/* If a list of UUIDs is provided in filter, results with no
8179 		 * matching UUID should be dropped.
8180 		 */
8181 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8182 				   hdev->discovery.uuids) &&
8183 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8184 				   hdev->discovery.uuid_count,
8185 				   hdev->discovery.uuids))
8186 			return false;
8187 	}
8188 
8189 	/* If duplicate filtering does not report RSSI changes, then restart
8190 	 * scanning to ensure updated result with updated RSSI values.
8191 	 */
8192 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8193 		restart_le_scan(hdev);
8194 
8195 		/* Validate RSSI value against the RSSI threshold once more. */
8196 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8197 		    rssi < hdev->discovery.rssi)
8198 			return false;
8199 	}
8200 
8201 	return true;
8202 }
8203 
8204 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8205 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8206 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8207 {
8208 	char buf[512];
8209 	struct mgmt_ev_device_found *ev = (void *)buf;
8210 	size_t ev_size;
8211 
8212 	/* Don't send events for a non-kernel initiated discovery. With
8213 	 * LE one exception is if we have pend_le_reports > 0 in which
8214 	 * case we're doing passive scanning and want these events.
8215 	 */
8216 	if (!hci_discovery_active(hdev)) {
8217 		if (link_type == ACL_LINK)
8218 			return;
8219 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8220 			return;
8221 	}
8222 
8223 	if (hdev->discovery.result_filtering) {
8224 		/* We are using service discovery */
8225 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8226 				     scan_rsp_len))
8227 			return;
8228 	}
8229 
8230 	if (hdev->discovery.limited) {
8231 		/* Check for limited discoverable bit */
8232 		if (dev_class) {
8233 			if (!(dev_class[1] & 0x20))
8234 				return;
8235 		} else {
8236 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8237 			if (!flags || !(flags[0] & LE_AD_LIMITED))
8238 				return;
8239 		}
8240 	}
8241 
8242 	/* Make sure that the buffer is big enough. The 5 extra bytes
8243 	 * are for the potential CoD field.
8244 	 */
8245 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8246 		return;
8247 
8248 	memset(buf, 0, sizeof(buf));
8249 
8250 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8251 	 * RSSI value was reported as 0 when not available. This behavior
8252 	 * is kept when using device discovery. This is required for full
8253 	 * backwards compatibility with the API.
8254 	 *
8255 	 * However when using service discovery, the value 127 will be
8256 	 * returned when the RSSI is not available.
8257 	 */
8258 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8259 	    link_type == ACL_LINK)
8260 		rssi = 0;
8261 
8262 	bacpy(&ev->addr.bdaddr, bdaddr);
8263 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8264 	ev->rssi = rssi;
8265 	ev->flags = cpu_to_le32(flags);
8266 
8267 	if (eir_len > 0)
8268 		/* Copy EIR or advertising data into event */
8269 		memcpy(ev->eir, eir, eir_len);
8270 
8271 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8272 				       NULL))
8273 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8274 					  dev_class, 3);
8275 
8276 	if (scan_rsp_len > 0)
8277 		/* Append scan response data to event */
8278 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8279 
8280 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8281 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8282 
8283 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8284 }
8285 
8286 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8287 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8288 {
8289 	struct mgmt_ev_device_found *ev;
8290 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8291 	u16 eir_len;
8292 
8293 	ev = (struct mgmt_ev_device_found *) buf;
8294 
8295 	memset(buf, 0, sizeof(buf));
8296 
8297 	bacpy(&ev->addr.bdaddr, bdaddr);
8298 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8299 	ev->rssi = rssi;
8300 
8301 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8302 				  name_len);
8303 
8304 	ev->eir_len = cpu_to_le16(eir_len);
8305 
8306 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8307 }
8308 
8309 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8310 {
8311 	struct mgmt_ev_discovering ev;
8312 
8313 	bt_dev_dbg(hdev, "discovering %u", discovering);
8314 
8315 	memset(&ev, 0, sizeof(ev));
8316 	ev.type = hdev->discovery.type;
8317 	ev.discovering = discovering;
8318 
8319 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8320 }
8321 
8322 static struct hci_mgmt_chan chan = {
8323 	.channel	= HCI_CHANNEL_CONTROL,
8324 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8325 	.handlers	= mgmt_handlers,
8326 	.hdev_init	= mgmt_init_hdev,
8327 };
8328 
8329 int mgmt_init(void)
8330 {
8331 	return hci_mgmt_chan_register(&chan);
8332 }
8333 
8334 void mgmt_exit(void)
8335 {
8336 	hci_mgmt_chan_unregister(&chan);
8337 }
8338