xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 5e23a35c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "hci_request.h"
36 #include "smp.h"
37 
38 #define MGMT_VERSION	1
39 #define MGMT_REVISION	8
40 
41 static const u16 mgmt_commands[] = {
42 	MGMT_OP_READ_INDEX_LIST,
43 	MGMT_OP_READ_INFO,
44 	MGMT_OP_SET_POWERED,
45 	MGMT_OP_SET_DISCOVERABLE,
46 	MGMT_OP_SET_CONNECTABLE,
47 	MGMT_OP_SET_FAST_CONNECTABLE,
48 	MGMT_OP_SET_BONDABLE,
49 	MGMT_OP_SET_LINK_SECURITY,
50 	MGMT_OP_SET_SSP,
51 	MGMT_OP_SET_HS,
52 	MGMT_OP_SET_LE,
53 	MGMT_OP_SET_DEV_CLASS,
54 	MGMT_OP_SET_LOCAL_NAME,
55 	MGMT_OP_ADD_UUID,
56 	MGMT_OP_REMOVE_UUID,
57 	MGMT_OP_LOAD_LINK_KEYS,
58 	MGMT_OP_LOAD_LONG_TERM_KEYS,
59 	MGMT_OP_DISCONNECT,
60 	MGMT_OP_GET_CONNECTIONS,
61 	MGMT_OP_PIN_CODE_REPLY,
62 	MGMT_OP_PIN_CODE_NEG_REPLY,
63 	MGMT_OP_SET_IO_CAPABILITY,
64 	MGMT_OP_PAIR_DEVICE,
65 	MGMT_OP_CANCEL_PAIR_DEVICE,
66 	MGMT_OP_UNPAIR_DEVICE,
67 	MGMT_OP_USER_CONFIRM_REPLY,
68 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 	MGMT_OP_USER_PASSKEY_REPLY,
70 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 	MGMT_OP_READ_LOCAL_OOB_DATA,
72 	MGMT_OP_ADD_REMOTE_OOB_DATA,
73 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 	MGMT_OP_START_DISCOVERY,
75 	MGMT_OP_STOP_DISCOVERY,
76 	MGMT_OP_CONFIRM_NAME,
77 	MGMT_OP_BLOCK_DEVICE,
78 	MGMT_OP_UNBLOCK_DEVICE,
79 	MGMT_OP_SET_DEVICE_ID,
80 	MGMT_OP_SET_ADVERTISING,
81 	MGMT_OP_SET_BREDR,
82 	MGMT_OP_SET_STATIC_ADDRESS,
83 	MGMT_OP_SET_SCAN_PARAMS,
84 	MGMT_OP_SET_SECURE_CONN,
85 	MGMT_OP_SET_DEBUG_KEYS,
86 	MGMT_OP_SET_PRIVACY,
87 	MGMT_OP_LOAD_IRKS,
88 	MGMT_OP_GET_CONN_INFO,
89 	MGMT_OP_GET_CLOCK_INFO,
90 	MGMT_OP_ADD_DEVICE,
91 	MGMT_OP_REMOVE_DEVICE,
92 	MGMT_OP_LOAD_CONN_PARAM,
93 	MGMT_OP_READ_UNCONF_INDEX_LIST,
94 	MGMT_OP_READ_CONFIG_INFO,
95 	MGMT_OP_SET_EXTERNAL_CONFIG,
96 	MGMT_OP_SET_PUBLIC_ADDRESS,
97 	MGMT_OP_START_SERVICE_DISCOVERY,
98 };
99 
100 static const u16 mgmt_events[] = {
101 	MGMT_EV_CONTROLLER_ERROR,
102 	MGMT_EV_INDEX_ADDED,
103 	MGMT_EV_INDEX_REMOVED,
104 	MGMT_EV_NEW_SETTINGS,
105 	MGMT_EV_CLASS_OF_DEV_CHANGED,
106 	MGMT_EV_LOCAL_NAME_CHANGED,
107 	MGMT_EV_NEW_LINK_KEY,
108 	MGMT_EV_NEW_LONG_TERM_KEY,
109 	MGMT_EV_DEVICE_CONNECTED,
110 	MGMT_EV_DEVICE_DISCONNECTED,
111 	MGMT_EV_CONNECT_FAILED,
112 	MGMT_EV_PIN_CODE_REQUEST,
113 	MGMT_EV_USER_CONFIRM_REQUEST,
114 	MGMT_EV_USER_PASSKEY_REQUEST,
115 	MGMT_EV_AUTH_FAILED,
116 	MGMT_EV_DEVICE_FOUND,
117 	MGMT_EV_DISCOVERING,
118 	MGMT_EV_DEVICE_BLOCKED,
119 	MGMT_EV_DEVICE_UNBLOCKED,
120 	MGMT_EV_DEVICE_UNPAIRED,
121 	MGMT_EV_PASSKEY_NOTIFY,
122 	MGMT_EV_NEW_IRK,
123 	MGMT_EV_NEW_CSRK,
124 	MGMT_EV_DEVICE_ADDED,
125 	MGMT_EV_DEVICE_REMOVED,
126 	MGMT_EV_NEW_CONN_PARAM,
127 	MGMT_EV_UNCONF_INDEX_ADDED,
128 	MGMT_EV_UNCONF_INDEX_REMOVED,
129 	MGMT_EV_NEW_CONFIG_OPTIONS,
130 };
131 
132 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
133 
134 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
135 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
136 
137 struct pending_cmd {
138 	struct list_head list;
139 	u16 opcode;
140 	int index;
141 	void *param;
142 	size_t param_len;
143 	struct sock *sk;
144 	void *user_data;
145 	int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
146 };
147 
148 /* HCI to MGMT error code conversion table */
149 static u8 mgmt_status_table[] = {
150 	MGMT_STATUS_SUCCESS,
151 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
152 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
153 	MGMT_STATUS_FAILED,		/* Hardware Failure */
154 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
155 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
156 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
157 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
158 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
159 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
160 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
161 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
162 	MGMT_STATUS_BUSY,		/* Command Disallowed */
163 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
164 	MGMT_STATUS_REJECTED,		/* Rejected Security */
165 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
166 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
167 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
168 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
169 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
170 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
171 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
172 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
173 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
174 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
175 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
176 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
177 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
178 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
179 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
180 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
181 	MGMT_STATUS_FAILED,		/* Unspecified Error */
182 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
183 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
184 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
185 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
186 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
187 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
188 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
189 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
190 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
191 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
192 	MGMT_STATUS_FAILED,		/* Transaction Collision */
193 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
194 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
195 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
196 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
197 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
198 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
199 	MGMT_STATUS_FAILED,		/* Slot Violation */
200 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
201 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
202 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
203 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
204 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
205 	MGMT_STATUS_BUSY,		/* Controller Busy */
206 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
207 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
208 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
209 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
210 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
211 };
212 
213 static u8 mgmt_status(u8 hci_status)
214 {
215 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
216 		return mgmt_status_table[hci_status];
217 
218 	return MGMT_STATUS_FAILED;
219 }
220 
221 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
222 		      struct sock *skip_sk)
223 {
224 	struct sk_buff *skb;
225 	struct mgmt_hdr *hdr;
226 
227 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
228 	if (!skb)
229 		return -ENOMEM;
230 
231 	hdr = (void *) skb_put(skb, sizeof(*hdr));
232 	hdr->opcode = cpu_to_le16(event);
233 	if (hdev)
234 		hdr->index = cpu_to_le16(hdev->id);
235 	else
236 		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
237 	hdr->len = cpu_to_le16(data_len);
238 
239 	if (data)
240 		memcpy(skb_put(skb, data_len), data, data_len);
241 
242 	/* Time stamp */
243 	__net_timestamp(skb);
244 
245 	hci_send_to_control(skb, skip_sk);
246 	kfree_skb(skb);
247 
248 	return 0;
249 }
250 
251 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
252 {
253 	struct sk_buff *skb;
254 	struct mgmt_hdr *hdr;
255 	struct mgmt_ev_cmd_status *ev;
256 	int err;
257 
258 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
259 
260 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
261 	if (!skb)
262 		return -ENOMEM;
263 
264 	hdr = (void *) skb_put(skb, sizeof(*hdr));
265 
266 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
267 	hdr->index = cpu_to_le16(index);
268 	hdr->len = cpu_to_le16(sizeof(*ev));
269 
270 	ev = (void *) skb_put(skb, sizeof(*ev));
271 	ev->status = status;
272 	ev->opcode = cpu_to_le16(cmd);
273 
274 	err = sock_queue_rcv_skb(sk, skb);
275 	if (err < 0)
276 		kfree_skb(skb);
277 
278 	return err;
279 }
280 
281 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
282 			void *rp, size_t rp_len)
283 {
284 	struct sk_buff *skb;
285 	struct mgmt_hdr *hdr;
286 	struct mgmt_ev_cmd_complete *ev;
287 	int err;
288 
289 	BT_DBG("sock %p", sk);
290 
291 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
292 	if (!skb)
293 		return -ENOMEM;
294 
295 	hdr = (void *) skb_put(skb, sizeof(*hdr));
296 
297 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
298 	hdr->index = cpu_to_le16(index);
299 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
300 
301 	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
302 	ev->opcode = cpu_to_le16(cmd);
303 	ev->status = status;
304 
305 	if (rp)
306 		memcpy(ev->data, rp, rp_len);
307 
308 	err = sock_queue_rcv_skb(sk, skb);
309 	if (err < 0)
310 		kfree_skb(skb);
311 
312 	return err;
313 }
314 
315 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
316 			u16 data_len)
317 {
318 	struct mgmt_rp_read_version rp;
319 
320 	BT_DBG("sock %p", sk);
321 
322 	rp.version = MGMT_VERSION;
323 	rp.revision = cpu_to_le16(MGMT_REVISION);
324 
325 	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
326 			    sizeof(rp));
327 }
328 
329 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
330 			 u16 data_len)
331 {
332 	struct mgmt_rp_read_commands *rp;
333 	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
334 	const u16 num_events = ARRAY_SIZE(mgmt_events);
335 	__le16 *opcode;
336 	size_t rp_size;
337 	int i, err;
338 
339 	BT_DBG("sock %p", sk);
340 
341 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
342 
343 	rp = kmalloc(rp_size, GFP_KERNEL);
344 	if (!rp)
345 		return -ENOMEM;
346 
347 	rp->num_commands = cpu_to_le16(num_commands);
348 	rp->num_events = cpu_to_le16(num_events);
349 
350 	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
351 		put_unaligned_le16(mgmt_commands[i], opcode);
352 
353 	for (i = 0; i < num_events; i++, opcode++)
354 		put_unaligned_le16(mgmt_events[i], opcode);
355 
356 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
357 			   rp_size);
358 	kfree(rp);
359 
360 	return err;
361 }
362 
363 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
364 			   u16 data_len)
365 {
366 	struct mgmt_rp_read_index_list *rp;
367 	struct hci_dev *d;
368 	size_t rp_len;
369 	u16 count;
370 	int err;
371 
372 	BT_DBG("sock %p", sk);
373 
374 	read_lock(&hci_dev_list_lock);
375 
376 	count = 0;
377 	list_for_each_entry(d, &hci_dev_list, list) {
378 		if (d->dev_type == HCI_BREDR &&
379 		    !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
380 			count++;
381 	}
382 
383 	rp_len = sizeof(*rp) + (2 * count);
384 	rp = kmalloc(rp_len, GFP_ATOMIC);
385 	if (!rp) {
386 		read_unlock(&hci_dev_list_lock);
387 		return -ENOMEM;
388 	}
389 
390 	count = 0;
391 	list_for_each_entry(d, &hci_dev_list, list) {
392 		if (test_bit(HCI_SETUP, &d->dev_flags) ||
393 		    test_bit(HCI_CONFIG, &d->dev_flags) ||
394 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags))
395 			continue;
396 
397 		/* Devices marked as raw-only are neither configured
398 		 * nor unconfigured controllers.
399 		 */
400 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
401 			continue;
402 
403 		if (d->dev_type == HCI_BREDR &&
404 		    !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
405 			rp->index[count++] = cpu_to_le16(d->id);
406 			BT_DBG("Added hci%u", d->id);
407 		}
408 	}
409 
410 	rp->num_controllers = cpu_to_le16(count);
411 	rp_len = sizeof(*rp) + (2 * count);
412 
413 	read_unlock(&hci_dev_list_lock);
414 
415 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
416 			   rp_len);
417 
418 	kfree(rp);
419 
420 	return err;
421 }
422 
423 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
424 				  void *data, u16 data_len)
425 {
426 	struct mgmt_rp_read_unconf_index_list *rp;
427 	struct hci_dev *d;
428 	size_t rp_len;
429 	u16 count;
430 	int err;
431 
432 	BT_DBG("sock %p", sk);
433 
434 	read_lock(&hci_dev_list_lock);
435 
436 	count = 0;
437 	list_for_each_entry(d, &hci_dev_list, list) {
438 		if (d->dev_type == HCI_BREDR &&
439 		    test_bit(HCI_UNCONFIGURED, &d->dev_flags))
440 			count++;
441 	}
442 
443 	rp_len = sizeof(*rp) + (2 * count);
444 	rp = kmalloc(rp_len, GFP_ATOMIC);
445 	if (!rp) {
446 		read_unlock(&hci_dev_list_lock);
447 		return -ENOMEM;
448 	}
449 
450 	count = 0;
451 	list_for_each_entry(d, &hci_dev_list, list) {
452 		if (test_bit(HCI_SETUP, &d->dev_flags) ||
453 		    test_bit(HCI_CONFIG, &d->dev_flags) ||
454 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags))
455 			continue;
456 
457 		/* Devices marked as raw-only are neither configured
458 		 * nor unconfigured controllers.
459 		 */
460 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
461 			continue;
462 
463 		if (d->dev_type == HCI_BREDR &&
464 		    test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
465 			rp->index[count++] = cpu_to_le16(d->id);
466 			BT_DBG("Added hci%u", d->id);
467 		}
468 	}
469 
470 	rp->num_controllers = cpu_to_le16(count);
471 	rp_len = sizeof(*rp) + (2 * count);
472 
473 	read_unlock(&hci_dev_list_lock);
474 
475 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
476 			   0, rp, rp_len);
477 
478 	kfree(rp);
479 
480 	return err;
481 }
482 
483 static bool is_configured(struct hci_dev *hdev)
484 {
485 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
486 	    !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
487 		return false;
488 
489 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
490 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
491 		return false;
492 
493 	return true;
494 }
495 
496 static __le32 get_missing_options(struct hci_dev *hdev)
497 {
498 	u32 options = 0;
499 
500 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
501 	    !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
502 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
503 
504 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
505 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
506 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
507 
508 	return cpu_to_le32(options);
509 }
510 
511 static int new_options(struct hci_dev *hdev, struct sock *skip)
512 {
513 	__le32 options = get_missing_options(hdev);
514 
515 	return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
516 			  sizeof(options), skip);
517 }
518 
519 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
520 {
521 	__le32 options = get_missing_options(hdev);
522 
523 	return cmd_complete(sk, hdev->id, opcode, 0, &options,
524 			    sizeof(options));
525 }
526 
527 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
528 			    void *data, u16 data_len)
529 {
530 	struct mgmt_rp_read_config_info rp;
531 	u32 options = 0;
532 
533 	BT_DBG("sock %p %s", sk, hdev->name);
534 
535 	hci_dev_lock(hdev);
536 
537 	memset(&rp, 0, sizeof(rp));
538 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
539 
540 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
541 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
542 
543 	if (hdev->set_bdaddr)
544 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
545 
546 	rp.supported_options = cpu_to_le32(options);
547 	rp.missing_options = get_missing_options(hdev);
548 
549 	hci_dev_unlock(hdev);
550 
551 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
552 			    sizeof(rp));
553 }
554 
555 static u32 get_supported_settings(struct hci_dev *hdev)
556 {
557 	u32 settings = 0;
558 
559 	settings |= MGMT_SETTING_POWERED;
560 	settings |= MGMT_SETTING_BONDABLE;
561 	settings |= MGMT_SETTING_DEBUG_KEYS;
562 	settings |= MGMT_SETTING_CONNECTABLE;
563 	settings |= MGMT_SETTING_DISCOVERABLE;
564 
565 	if (lmp_bredr_capable(hdev)) {
566 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
567 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
568 		settings |= MGMT_SETTING_BREDR;
569 		settings |= MGMT_SETTING_LINK_SECURITY;
570 
571 		if (lmp_ssp_capable(hdev)) {
572 			settings |= MGMT_SETTING_SSP;
573 			settings |= MGMT_SETTING_HS;
574 		}
575 
576 		if (lmp_sc_capable(hdev))
577 			settings |= MGMT_SETTING_SECURE_CONN;
578 	}
579 
580 	if (lmp_le_capable(hdev)) {
581 		settings |= MGMT_SETTING_LE;
582 		settings |= MGMT_SETTING_ADVERTISING;
583 		settings |= MGMT_SETTING_SECURE_CONN;
584 		settings |= MGMT_SETTING_PRIVACY;
585 	}
586 
587 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
588 	    hdev->set_bdaddr)
589 		settings |= MGMT_SETTING_CONFIGURATION;
590 
591 	return settings;
592 }
593 
594 static u32 get_current_settings(struct hci_dev *hdev)
595 {
596 	u32 settings = 0;
597 
598 	if (hdev_is_powered(hdev))
599 		settings |= MGMT_SETTING_POWERED;
600 
601 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
602 		settings |= MGMT_SETTING_CONNECTABLE;
603 
604 	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
605 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
606 
607 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
608 		settings |= MGMT_SETTING_DISCOVERABLE;
609 
610 	if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
611 		settings |= MGMT_SETTING_BONDABLE;
612 
613 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
614 		settings |= MGMT_SETTING_BREDR;
615 
616 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
617 		settings |= MGMT_SETTING_LE;
618 
619 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
620 		settings |= MGMT_SETTING_LINK_SECURITY;
621 
622 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
623 		settings |= MGMT_SETTING_SSP;
624 
625 	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
626 		settings |= MGMT_SETTING_HS;
627 
628 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
629 		settings |= MGMT_SETTING_ADVERTISING;
630 
631 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
632 		settings |= MGMT_SETTING_SECURE_CONN;
633 
634 	if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
635 		settings |= MGMT_SETTING_DEBUG_KEYS;
636 
637 	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
638 		settings |= MGMT_SETTING_PRIVACY;
639 
640 	return settings;
641 }
642 
643 #define PNP_INFO_SVCLASS_ID		0x1200
644 
645 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
646 {
647 	u8 *ptr = data, *uuids_start = NULL;
648 	struct bt_uuid *uuid;
649 
650 	if (len < 4)
651 		return ptr;
652 
653 	list_for_each_entry(uuid, &hdev->uuids, list) {
654 		u16 uuid16;
655 
656 		if (uuid->size != 16)
657 			continue;
658 
659 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
660 		if (uuid16 < 0x1100)
661 			continue;
662 
663 		if (uuid16 == PNP_INFO_SVCLASS_ID)
664 			continue;
665 
666 		if (!uuids_start) {
667 			uuids_start = ptr;
668 			uuids_start[0] = 1;
669 			uuids_start[1] = EIR_UUID16_ALL;
670 			ptr += 2;
671 		}
672 
673 		/* Stop if not enough space to put next UUID */
674 		if ((ptr - data) + sizeof(u16) > len) {
675 			uuids_start[1] = EIR_UUID16_SOME;
676 			break;
677 		}
678 
679 		*ptr++ = (uuid16 & 0x00ff);
680 		*ptr++ = (uuid16 & 0xff00) >> 8;
681 		uuids_start[0] += sizeof(uuid16);
682 	}
683 
684 	return ptr;
685 }
686 
687 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
688 {
689 	u8 *ptr = data, *uuids_start = NULL;
690 	struct bt_uuid *uuid;
691 
692 	if (len < 6)
693 		return ptr;
694 
695 	list_for_each_entry(uuid, &hdev->uuids, list) {
696 		if (uuid->size != 32)
697 			continue;
698 
699 		if (!uuids_start) {
700 			uuids_start = ptr;
701 			uuids_start[0] = 1;
702 			uuids_start[1] = EIR_UUID32_ALL;
703 			ptr += 2;
704 		}
705 
706 		/* Stop if not enough space to put next UUID */
707 		if ((ptr - data) + sizeof(u32) > len) {
708 			uuids_start[1] = EIR_UUID32_SOME;
709 			break;
710 		}
711 
712 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
713 		ptr += sizeof(u32);
714 		uuids_start[0] += sizeof(u32);
715 	}
716 
717 	return ptr;
718 }
719 
720 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
721 {
722 	u8 *ptr = data, *uuids_start = NULL;
723 	struct bt_uuid *uuid;
724 
725 	if (len < 18)
726 		return ptr;
727 
728 	list_for_each_entry(uuid, &hdev->uuids, list) {
729 		if (uuid->size != 128)
730 			continue;
731 
732 		if (!uuids_start) {
733 			uuids_start = ptr;
734 			uuids_start[0] = 1;
735 			uuids_start[1] = EIR_UUID128_ALL;
736 			ptr += 2;
737 		}
738 
739 		/* Stop if not enough space to put next UUID */
740 		if ((ptr - data) + 16 > len) {
741 			uuids_start[1] = EIR_UUID128_SOME;
742 			break;
743 		}
744 
745 		memcpy(ptr, uuid->uuid, 16);
746 		ptr += 16;
747 		uuids_start[0] += 16;
748 	}
749 
750 	return ptr;
751 }
752 
753 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
754 {
755 	struct pending_cmd *cmd;
756 
757 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
758 		if (cmd->opcode == opcode)
759 			return cmd;
760 	}
761 
762 	return NULL;
763 }
764 
765 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
766 						  struct hci_dev *hdev,
767 						  const void *data)
768 {
769 	struct pending_cmd *cmd;
770 
771 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
772 		if (cmd->user_data != data)
773 			continue;
774 		if (cmd->opcode == opcode)
775 			return cmd;
776 	}
777 
778 	return NULL;
779 }
780 
781 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
782 {
783 	u8 ad_len = 0;
784 	size_t name_len;
785 
786 	name_len = strlen(hdev->dev_name);
787 	if (name_len > 0) {
788 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
789 
790 		if (name_len > max_len) {
791 			name_len = max_len;
792 			ptr[1] = EIR_NAME_SHORT;
793 		} else
794 			ptr[1] = EIR_NAME_COMPLETE;
795 
796 		ptr[0] = name_len + 1;
797 
798 		memcpy(ptr + 2, hdev->dev_name, name_len);
799 
800 		ad_len += (name_len + 2);
801 		ptr += (name_len + 2);
802 	}
803 
804 	return ad_len;
805 }
806 
807 static void update_scan_rsp_data(struct hci_request *req)
808 {
809 	struct hci_dev *hdev = req->hdev;
810 	struct hci_cp_le_set_scan_rsp_data cp;
811 	u8 len;
812 
813 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
814 		return;
815 
816 	memset(&cp, 0, sizeof(cp));
817 
818 	len = create_scan_rsp_data(hdev, cp.data);
819 
820 	if (hdev->scan_rsp_data_len == len &&
821 	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
822 		return;
823 
824 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
825 	hdev->scan_rsp_data_len = len;
826 
827 	cp.length = len;
828 
829 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
830 }
831 
832 static u8 get_adv_discov_flags(struct hci_dev *hdev)
833 {
834 	struct pending_cmd *cmd;
835 
836 	/* If there's a pending mgmt command the flags will not yet have
837 	 * their final values, so check for this first.
838 	 */
839 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
840 	if (cmd) {
841 		struct mgmt_mode *cp = cmd->param;
842 		if (cp->val == 0x01)
843 			return LE_AD_GENERAL;
844 		else if (cp->val == 0x02)
845 			return LE_AD_LIMITED;
846 	} else {
847 		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
848 			return LE_AD_LIMITED;
849 		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
850 			return LE_AD_GENERAL;
851 	}
852 
853 	return 0;
854 }
855 
856 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
857 {
858 	u8 ad_len = 0, flags = 0;
859 
860 	flags |= get_adv_discov_flags(hdev);
861 
862 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
863 		flags |= LE_AD_NO_BREDR;
864 
865 	if (flags) {
866 		BT_DBG("adv flags 0x%02x", flags);
867 
868 		ptr[0] = 2;
869 		ptr[1] = EIR_FLAGS;
870 		ptr[2] = flags;
871 
872 		ad_len += 3;
873 		ptr += 3;
874 	}
875 
876 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
877 		ptr[0] = 2;
878 		ptr[1] = EIR_TX_POWER;
879 		ptr[2] = (u8) hdev->adv_tx_power;
880 
881 		ad_len += 3;
882 		ptr += 3;
883 	}
884 
885 	return ad_len;
886 }
887 
888 static void update_adv_data(struct hci_request *req)
889 {
890 	struct hci_dev *hdev = req->hdev;
891 	struct hci_cp_le_set_adv_data cp;
892 	u8 len;
893 
894 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
895 		return;
896 
897 	memset(&cp, 0, sizeof(cp));
898 
899 	len = create_adv_data(hdev, cp.data);
900 
901 	if (hdev->adv_data_len == len &&
902 	    memcmp(cp.data, hdev->adv_data, len) == 0)
903 		return;
904 
905 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
906 	hdev->adv_data_len = len;
907 
908 	cp.length = len;
909 
910 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
911 }
912 
913 int mgmt_update_adv_data(struct hci_dev *hdev)
914 {
915 	struct hci_request req;
916 
917 	hci_req_init(&req, hdev);
918 	update_adv_data(&req);
919 
920 	return hci_req_run(&req, NULL);
921 }
922 
923 static void create_eir(struct hci_dev *hdev, u8 *data)
924 {
925 	u8 *ptr = data;
926 	size_t name_len;
927 
928 	name_len = strlen(hdev->dev_name);
929 
930 	if (name_len > 0) {
931 		/* EIR Data type */
932 		if (name_len > 48) {
933 			name_len = 48;
934 			ptr[1] = EIR_NAME_SHORT;
935 		} else
936 			ptr[1] = EIR_NAME_COMPLETE;
937 
938 		/* EIR Data length */
939 		ptr[0] = name_len + 1;
940 
941 		memcpy(ptr + 2, hdev->dev_name, name_len);
942 
943 		ptr += (name_len + 2);
944 	}
945 
946 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
947 		ptr[0] = 2;
948 		ptr[1] = EIR_TX_POWER;
949 		ptr[2] = (u8) hdev->inq_tx_power;
950 
951 		ptr += 3;
952 	}
953 
954 	if (hdev->devid_source > 0) {
955 		ptr[0] = 9;
956 		ptr[1] = EIR_DEVICE_ID;
957 
958 		put_unaligned_le16(hdev->devid_source, ptr + 2);
959 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
960 		put_unaligned_le16(hdev->devid_product, ptr + 6);
961 		put_unaligned_le16(hdev->devid_version, ptr + 8);
962 
963 		ptr += 10;
964 	}
965 
966 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
967 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
969 }
970 
971 static void update_eir(struct hci_request *req)
972 {
973 	struct hci_dev *hdev = req->hdev;
974 	struct hci_cp_write_eir cp;
975 
976 	if (!hdev_is_powered(hdev))
977 		return;
978 
979 	if (!lmp_ext_inq_capable(hdev))
980 		return;
981 
982 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
983 		return;
984 
985 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
986 		return;
987 
988 	memset(&cp, 0, sizeof(cp));
989 
990 	create_eir(hdev, cp.data);
991 
992 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
993 		return;
994 
995 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
996 
997 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
998 }
999 
1000 static u8 get_service_classes(struct hci_dev *hdev)
1001 {
1002 	struct bt_uuid *uuid;
1003 	u8 val = 0;
1004 
1005 	list_for_each_entry(uuid, &hdev->uuids, list)
1006 		val |= uuid->svc_hint;
1007 
1008 	return val;
1009 }
1010 
1011 static void update_class(struct hci_request *req)
1012 {
1013 	struct hci_dev *hdev = req->hdev;
1014 	u8 cod[3];
1015 
1016 	BT_DBG("%s", hdev->name);
1017 
1018 	if (!hdev_is_powered(hdev))
1019 		return;
1020 
1021 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1022 		return;
1023 
1024 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1025 		return;
1026 
1027 	cod[0] = hdev->minor_class;
1028 	cod[1] = hdev->major_class;
1029 	cod[2] = get_service_classes(hdev);
1030 
1031 	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1032 		cod[1] |= 0x20;
1033 
1034 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1035 		return;
1036 
1037 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1038 }
1039 
1040 static bool get_connectable(struct hci_dev *hdev)
1041 {
1042 	struct pending_cmd *cmd;
1043 
1044 	/* If there's a pending mgmt command the flag will not yet have
1045 	 * it's final value, so check for this first.
1046 	 */
1047 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1048 	if (cmd) {
1049 		struct mgmt_mode *cp = cmd->param;
1050 		return cp->val;
1051 	}
1052 
1053 	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1054 }
1055 
1056 static void disable_advertising(struct hci_request *req)
1057 {
1058 	u8 enable = 0x00;
1059 
1060 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1061 }
1062 
1063 static void enable_advertising(struct hci_request *req)
1064 {
1065 	struct hci_dev *hdev = req->hdev;
1066 	struct hci_cp_le_set_adv_param cp;
1067 	u8 own_addr_type, enable = 0x01;
1068 	bool connectable;
1069 
1070 	if (hci_conn_num(hdev, LE_LINK) > 0)
1071 		return;
1072 
1073 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1074 		disable_advertising(req);
1075 
1076 	/* Clear the HCI_LE_ADV bit temporarily so that the
1077 	 * hci_update_random_address knows that it's safe to go ahead
1078 	 * and write a new random address. The flag will be set back on
1079 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1080 	 */
1081 	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1082 
1083 	connectable = get_connectable(hdev);
1084 
1085 	/* Set require_privacy to true only when non-connectable
1086 	 * advertising is used. In that case it is fine to use a
1087 	 * non-resolvable private address.
1088 	 */
1089 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1090 		return;
1091 
1092 	memset(&cp, 0, sizeof(cp));
1093 	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1094 	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1095 	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1096 	cp.own_address_type = own_addr_type;
1097 	cp.channel_map = hdev->le_adv_channel_map;
1098 
1099 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1100 
1101 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1102 }
1103 
1104 static void service_cache_off(struct work_struct *work)
1105 {
1106 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 					    service_cache.work);
1108 	struct hci_request req;
1109 
1110 	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1111 		return;
1112 
1113 	hci_req_init(&req, hdev);
1114 
1115 	hci_dev_lock(hdev);
1116 
1117 	update_eir(&req);
1118 	update_class(&req);
1119 
1120 	hci_dev_unlock(hdev);
1121 
1122 	hci_req_run(&req, NULL);
1123 }
1124 
1125 static void rpa_expired(struct work_struct *work)
1126 {
1127 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1128 					    rpa_expired.work);
1129 	struct hci_request req;
1130 
1131 	BT_DBG("");
1132 
1133 	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1134 
1135 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1136 		return;
1137 
1138 	/* The generation of a new RPA and programming it into the
1139 	 * controller happens in the enable_advertising() function.
1140 	 */
1141 	hci_req_init(&req, hdev);
1142 	enable_advertising(&req);
1143 	hci_req_run(&req, NULL);
1144 }
1145 
1146 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1147 {
1148 	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1149 		return;
1150 
1151 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1152 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1153 
1154 	/* Non-mgmt controlled devices get this bit set
1155 	 * implicitly so that pairing works for them, however
1156 	 * for mgmt we require user-space to explicitly enable
1157 	 * it
1158 	 */
1159 	clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1160 }
1161 
1162 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1163 				void *data, u16 data_len)
1164 {
1165 	struct mgmt_rp_read_info rp;
1166 
1167 	BT_DBG("sock %p %s", sk, hdev->name);
1168 
1169 	hci_dev_lock(hdev);
1170 
1171 	memset(&rp, 0, sizeof(rp));
1172 
1173 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1174 
1175 	rp.version = hdev->hci_ver;
1176 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1177 
1178 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1179 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1180 
1181 	memcpy(rp.dev_class, hdev->dev_class, 3);
1182 
1183 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1184 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1185 
1186 	hci_dev_unlock(hdev);
1187 
1188 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1189 			    sizeof(rp));
1190 }
1191 
1192 static void mgmt_pending_free(struct pending_cmd *cmd)
1193 {
1194 	sock_put(cmd->sk);
1195 	kfree(cmd->param);
1196 	kfree(cmd);
1197 }
1198 
1199 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1200 					    struct hci_dev *hdev, void *data,
1201 					    u16 len)
1202 {
1203 	struct pending_cmd *cmd;
1204 
1205 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1206 	if (!cmd)
1207 		return NULL;
1208 
1209 	cmd->opcode = opcode;
1210 	cmd->index = hdev->id;
1211 
1212 	cmd->param = kmemdup(data, len, GFP_KERNEL);
1213 	if (!cmd->param) {
1214 		kfree(cmd);
1215 		return NULL;
1216 	}
1217 
1218 	cmd->param_len = len;
1219 
1220 	cmd->sk = sk;
1221 	sock_hold(sk);
1222 
1223 	list_add(&cmd->list, &hdev->mgmt_pending);
1224 
1225 	return cmd;
1226 }
1227 
1228 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1229 				 void (*cb)(struct pending_cmd *cmd,
1230 					    void *data),
1231 				 void *data)
1232 {
1233 	struct pending_cmd *cmd, *tmp;
1234 
1235 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1236 		if (opcode > 0 && cmd->opcode != opcode)
1237 			continue;
1238 
1239 		cb(cmd, data);
1240 	}
1241 }
1242 
1243 static void mgmt_pending_remove(struct pending_cmd *cmd)
1244 {
1245 	list_del(&cmd->list);
1246 	mgmt_pending_free(cmd);
1247 }
1248 
1249 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1250 {
1251 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1252 
1253 	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 			    sizeof(settings));
1255 }
1256 
1257 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1258 {
1259 	BT_DBG("%s status 0x%02x", hdev->name, status);
1260 
1261 	if (hci_conn_count(hdev) == 0) {
1262 		cancel_delayed_work(&hdev->power_off);
1263 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 	}
1265 }
1266 
1267 static bool hci_stop_discovery(struct hci_request *req)
1268 {
1269 	struct hci_dev *hdev = req->hdev;
1270 	struct hci_cp_remote_name_req_cancel cp;
1271 	struct inquiry_entry *e;
1272 
1273 	switch (hdev->discovery.state) {
1274 	case DISCOVERY_FINDING:
1275 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1276 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1277 		} else {
1278 			cancel_delayed_work(&hdev->le_scan_disable);
1279 			hci_req_add_le_scan_disable(req);
1280 		}
1281 
1282 		return true;
1283 
1284 	case DISCOVERY_RESOLVING:
1285 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1286 						     NAME_PENDING);
1287 		if (!e)
1288 			break;
1289 
1290 		bacpy(&cp.bdaddr, &e->data.bdaddr);
1291 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1292 			    &cp);
1293 
1294 		return true;
1295 
1296 	default:
1297 		/* Passive scanning */
1298 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1299 			hci_req_add_le_scan_disable(req);
1300 			return true;
1301 		}
1302 
1303 		break;
1304 	}
1305 
1306 	return false;
1307 }
1308 
1309 static int clean_up_hci_state(struct hci_dev *hdev)
1310 {
1311 	struct hci_request req;
1312 	struct hci_conn *conn;
1313 	bool discov_stopped;
1314 	int err;
1315 
1316 	hci_req_init(&req, hdev);
1317 
1318 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1319 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1320 		u8 scan = 0x00;
1321 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1322 	}
1323 
1324 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1325 		disable_advertising(&req);
1326 
1327 	discov_stopped = hci_stop_discovery(&req);
1328 
1329 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1330 		struct hci_cp_disconnect dc;
1331 		struct hci_cp_reject_conn_req rej;
1332 
1333 		switch (conn->state) {
1334 		case BT_CONNECTED:
1335 		case BT_CONFIG:
1336 			dc.handle = cpu_to_le16(conn->handle);
1337 			dc.reason = 0x15; /* Terminated due to Power Off */
1338 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1339 			break;
1340 		case BT_CONNECT:
1341 			if (conn->type == LE_LINK)
1342 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1343 					    0, NULL);
1344 			else if (conn->type == ACL_LINK)
1345 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 					    6, &conn->dst);
1347 			break;
1348 		case BT_CONNECT2:
1349 			bacpy(&rej.bdaddr, &conn->dst);
1350 			rej.reason = 0x15; /* Terminated due to Power Off */
1351 			if (conn->type == ACL_LINK)
1352 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1353 					    sizeof(rej), &rej);
1354 			else if (conn->type == SCO_LINK)
1355 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1356 					    sizeof(rej), &rej);
1357 			break;
1358 		}
1359 	}
1360 
1361 	err = hci_req_run(&req, clean_up_hci_complete);
1362 	if (!err && discov_stopped)
1363 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1364 
1365 	return err;
1366 }
1367 
1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 		       u16 len)
1370 {
1371 	struct mgmt_mode *cp = data;
1372 	struct pending_cmd *cmd;
1373 	int err;
1374 
1375 	BT_DBG("request for %s", hdev->name);
1376 
1377 	if (cp->val != 0x00 && cp->val != 0x01)
1378 		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 				  MGMT_STATUS_INVALID_PARAMS);
1380 
1381 	hci_dev_lock(hdev);
1382 
1383 	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 				 MGMT_STATUS_BUSY);
1386 		goto failed;
1387 	}
1388 
1389 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1390 		cancel_delayed_work(&hdev->power_off);
1391 
1392 		if (cp->val) {
1393 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1394 					 data, len);
1395 			err = mgmt_powered(hdev, 1);
1396 			goto failed;
1397 		}
1398 	}
1399 
1400 	if (!!cp->val == hdev_is_powered(hdev)) {
1401 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 		goto failed;
1403 	}
1404 
1405 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1406 	if (!cmd) {
1407 		err = -ENOMEM;
1408 		goto failed;
1409 	}
1410 
1411 	if (cp->val) {
1412 		queue_work(hdev->req_workqueue, &hdev->power_on);
1413 		err = 0;
1414 	} else {
1415 		/* Disconnect connections, stop scans, etc */
1416 		err = clean_up_hci_state(hdev);
1417 		if (!err)
1418 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1419 					   HCI_POWER_OFF_TIMEOUT);
1420 
1421 		/* ENODATA means there were no HCI commands queued */
1422 		if (err == -ENODATA) {
1423 			cancel_delayed_work(&hdev->power_off);
1424 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1425 			err = 0;
1426 		}
1427 	}
1428 
1429 failed:
1430 	hci_dev_unlock(hdev);
1431 	return err;
1432 }
1433 
1434 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 {
1436 	__le32 ev;
1437 
1438 	ev = cpu_to_le32(get_current_settings(hdev));
1439 
1440 	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1441 }
1442 
1443 int mgmt_new_settings(struct hci_dev *hdev)
1444 {
1445 	return new_settings(hdev, NULL);
1446 }
1447 
1448 struct cmd_lookup {
1449 	struct sock *sk;
1450 	struct hci_dev *hdev;
1451 	u8 mgmt_status;
1452 };
1453 
1454 static void settings_rsp(struct pending_cmd *cmd, void *data)
1455 {
1456 	struct cmd_lookup *match = data;
1457 
1458 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1459 
1460 	list_del(&cmd->list);
1461 
1462 	if (match->sk == NULL) {
1463 		match->sk = cmd->sk;
1464 		sock_hold(match->sk);
1465 	}
1466 
1467 	mgmt_pending_free(cmd);
1468 }
1469 
1470 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 {
1472 	u8 *status = data;
1473 
1474 	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1475 	mgmt_pending_remove(cmd);
1476 }
1477 
1478 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1479 {
1480 	if (cmd->cmd_complete) {
1481 		u8 *status = data;
1482 
1483 		cmd->cmd_complete(cmd, *status);
1484 		mgmt_pending_remove(cmd);
1485 
1486 		return;
1487 	}
1488 
1489 	cmd_status_rsp(cmd, data);
1490 }
1491 
1492 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1493 {
1494 	return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1495 			    cmd->param, cmd->param_len);
1496 }
1497 
1498 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1499 {
1500 	return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1501 			    sizeof(struct mgmt_addr_info));
1502 }
1503 
1504 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1505 {
1506 	if (!lmp_bredr_capable(hdev))
1507 		return MGMT_STATUS_NOT_SUPPORTED;
1508 	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1509 		return MGMT_STATUS_REJECTED;
1510 	else
1511 		return MGMT_STATUS_SUCCESS;
1512 }
1513 
1514 static u8 mgmt_le_support(struct hci_dev *hdev)
1515 {
1516 	if (!lmp_le_capable(hdev))
1517 		return MGMT_STATUS_NOT_SUPPORTED;
1518 	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1519 		return MGMT_STATUS_REJECTED;
1520 	else
1521 		return MGMT_STATUS_SUCCESS;
1522 }
1523 
1524 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1525 				      u16 opcode)
1526 {
1527 	struct pending_cmd *cmd;
1528 	struct mgmt_mode *cp;
1529 	struct hci_request req;
1530 	bool changed;
1531 
1532 	BT_DBG("status 0x%02x", status);
1533 
1534 	hci_dev_lock(hdev);
1535 
1536 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1537 	if (!cmd)
1538 		goto unlock;
1539 
1540 	if (status) {
1541 		u8 mgmt_err = mgmt_status(status);
1542 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1543 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1544 		goto remove_cmd;
1545 	}
1546 
1547 	cp = cmd->param;
1548 	if (cp->val) {
1549 		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1550 					    &hdev->dev_flags);
1551 
1552 		if (hdev->discov_timeout > 0) {
1553 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1554 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1555 					   to);
1556 		}
1557 	} else {
1558 		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1559 					     &hdev->dev_flags);
1560 	}
1561 
1562 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1563 
1564 	if (changed)
1565 		new_settings(hdev, cmd->sk);
1566 
1567 	/* When the discoverable mode gets changed, make sure
1568 	 * that class of device has the limited discoverable
1569 	 * bit correctly set. Also update page scan based on whitelist
1570 	 * entries.
1571 	 */
1572 	hci_req_init(&req, hdev);
1573 	__hci_update_page_scan(&req);
1574 	update_class(&req);
1575 	hci_req_run(&req, NULL);
1576 
1577 remove_cmd:
1578 	mgmt_pending_remove(cmd);
1579 
1580 unlock:
1581 	hci_dev_unlock(hdev);
1582 }
1583 
1584 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1585 			    u16 len)
1586 {
1587 	struct mgmt_cp_set_discoverable *cp = data;
1588 	struct pending_cmd *cmd;
1589 	struct hci_request req;
1590 	u16 timeout;
1591 	u8 scan;
1592 	int err;
1593 
1594 	BT_DBG("request for %s", hdev->name);
1595 
1596 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1597 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1598 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 				  MGMT_STATUS_REJECTED);
1600 
1601 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1602 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1603 				  MGMT_STATUS_INVALID_PARAMS);
1604 
1605 	timeout = __le16_to_cpu(cp->timeout);
1606 
1607 	/* Disabling discoverable requires that no timeout is set,
1608 	 * and enabling limited discoverable requires a timeout.
1609 	 */
1610 	if ((cp->val == 0x00 && timeout > 0) ||
1611 	    (cp->val == 0x02 && timeout == 0))
1612 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1613 				  MGMT_STATUS_INVALID_PARAMS);
1614 
1615 	hci_dev_lock(hdev);
1616 
1617 	if (!hdev_is_powered(hdev) && timeout > 0) {
1618 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1619 				 MGMT_STATUS_NOT_POWERED);
1620 		goto failed;
1621 	}
1622 
1623 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1624 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1625 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1626 				 MGMT_STATUS_BUSY);
1627 		goto failed;
1628 	}
1629 
1630 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1631 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1632 				 MGMT_STATUS_REJECTED);
1633 		goto failed;
1634 	}
1635 
1636 	if (!hdev_is_powered(hdev)) {
1637 		bool changed = false;
1638 
1639 		/* Setting limited discoverable when powered off is
1640 		 * not a valid operation since it requires a timeout
1641 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1642 		 */
1643 		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1644 			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1645 			changed = true;
1646 		}
1647 
1648 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1649 		if (err < 0)
1650 			goto failed;
1651 
1652 		if (changed)
1653 			err = new_settings(hdev, sk);
1654 
1655 		goto failed;
1656 	}
1657 
1658 	/* If the current mode is the same, then just update the timeout
1659 	 * value with the new value. And if only the timeout gets updated,
1660 	 * then no need for any HCI transactions.
1661 	 */
1662 	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1663 	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1664 					  &hdev->dev_flags)) {
1665 		cancel_delayed_work(&hdev->discov_off);
1666 		hdev->discov_timeout = timeout;
1667 
1668 		if (cp->val && hdev->discov_timeout > 0) {
1669 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1670 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1671 					   to);
1672 		}
1673 
1674 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1675 		goto failed;
1676 	}
1677 
1678 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1679 	if (!cmd) {
1680 		err = -ENOMEM;
1681 		goto failed;
1682 	}
1683 
1684 	/* Cancel any potential discoverable timeout that might be
1685 	 * still active and store new timeout value. The arming of
1686 	 * the timeout happens in the complete handler.
1687 	 */
1688 	cancel_delayed_work(&hdev->discov_off);
1689 	hdev->discov_timeout = timeout;
1690 
1691 	/* Limited discoverable mode */
1692 	if (cp->val == 0x02)
1693 		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1694 	else
1695 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1696 
1697 	hci_req_init(&req, hdev);
1698 
1699 	/* The procedure for LE-only controllers is much simpler - just
1700 	 * update the advertising data.
1701 	 */
1702 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1703 		goto update_ad;
1704 
1705 	scan = SCAN_PAGE;
1706 
1707 	if (cp->val) {
1708 		struct hci_cp_write_current_iac_lap hci_cp;
1709 
1710 		if (cp->val == 0x02) {
1711 			/* Limited discoverable mode */
1712 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1713 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1714 			hci_cp.iac_lap[1] = 0x8b;
1715 			hci_cp.iac_lap[2] = 0x9e;
1716 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1717 			hci_cp.iac_lap[4] = 0x8b;
1718 			hci_cp.iac_lap[5] = 0x9e;
1719 		} else {
1720 			/* General discoverable mode */
1721 			hci_cp.num_iac = 1;
1722 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1723 			hci_cp.iac_lap[1] = 0x8b;
1724 			hci_cp.iac_lap[2] = 0x9e;
1725 		}
1726 
1727 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1728 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1729 
1730 		scan |= SCAN_INQUIRY;
1731 	} else {
1732 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1733 	}
1734 
1735 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1736 
1737 update_ad:
1738 	update_adv_data(&req);
1739 
1740 	err = hci_req_run(&req, set_discoverable_complete);
1741 	if (err < 0)
1742 		mgmt_pending_remove(cmd);
1743 
1744 failed:
1745 	hci_dev_unlock(hdev);
1746 	return err;
1747 }
1748 
1749 static void write_fast_connectable(struct hci_request *req, bool enable)
1750 {
1751 	struct hci_dev *hdev = req->hdev;
1752 	struct hci_cp_write_page_scan_activity acp;
1753 	u8 type;
1754 
1755 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1756 		return;
1757 
1758 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1759 		return;
1760 
1761 	if (enable) {
1762 		type = PAGE_SCAN_TYPE_INTERLACED;
1763 
1764 		/* 160 msec page scan interval */
1765 		acp.interval = cpu_to_le16(0x0100);
1766 	} else {
1767 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1768 
1769 		/* default 1.28 sec page scan */
1770 		acp.interval = cpu_to_le16(0x0800);
1771 	}
1772 
1773 	acp.window = cpu_to_le16(0x0012);
1774 
1775 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1776 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1777 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1778 			    sizeof(acp), &acp);
1779 
1780 	if (hdev->page_scan_type != type)
1781 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1782 }
1783 
1784 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1785 				     u16 opcode)
1786 {
1787 	struct pending_cmd *cmd;
1788 	struct mgmt_mode *cp;
1789 	bool conn_changed, discov_changed;
1790 
1791 	BT_DBG("status 0x%02x", status);
1792 
1793 	hci_dev_lock(hdev);
1794 
1795 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1796 	if (!cmd)
1797 		goto unlock;
1798 
1799 	if (status) {
1800 		u8 mgmt_err = mgmt_status(status);
1801 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1802 		goto remove_cmd;
1803 	}
1804 
1805 	cp = cmd->param;
1806 	if (cp->val) {
1807 		conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1808 						 &hdev->dev_flags);
1809 		discov_changed = false;
1810 	} else {
1811 		conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1812 						  &hdev->dev_flags);
1813 		discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1814 						    &hdev->dev_flags);
1815 	}
1816 
1817 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1818 
1819 	if (conn_changed || discov_changed) {
1820 		new_settings(hdev, cmd->sk);
1821 		hci_update_page_scan(hdev);
1822 		if (discov_changed)
1823 			mgmt_update_adv_data(hdev);
1824 		hci_update_background_scan(hdev);
1825 	}
1826 
1827 remove_cmd:
1828 	mgmt_pending_remove(cmd);
1829 
1830 unlock:
1831 	hci_dev_unlock(hdev);
1832 }
1833 
1834 static int set_connectable_update_settings(struct hci_dev *hdev,
1835 					   struct sock *sk, u8 val)
1836 {
1837 	bool changed = false;
1838 	int err;
1839 
1840 	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1841 		changed = true;
1842 
1843 	if (val) {
1844 		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1845 	} else {
1846 		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1847 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1848 	}
1849 
1850 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1851 	if (err < 0)
1852 		return err;
1853 
1854 	if (changed) {
1855 		hci_update_page_scan(hdev);
1856 		hci_update_background_scan(hdev);
1857 		return new_settings(hdev, sk);
1858 	}
1859 
1860 	return 0;
1861 }
1862 
1863 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1864 			   u16 len)
1865 {
1866 	struct mgmt_mode *cp = data;
1867 	struct pending_cmd *cmd;
1868 	struct hci_request req;
1869 	u8 scan;
1870 	int err;
1871 
1872 	BT_DBG("request for %s", hdev->name);
1873 
1874 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1875 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1876 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1877 				  MGMT_STATUS_REJECTED);
1878 
1879 	if (cp->val != 0x00 && cp->val != 0x01)
1880 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1881 				  MGMT_STATUS_INVALID_PARAMS);
1882 
1883 	hci_dev_lock(hdev);
1884 
1885 	if (!hdev_is_powered(hdev)) {
1886 		err = set_connectable_update_settings(hdev, sk, cp->val);
1887 		goto failed;
1888 	}
1889 
1890 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1891 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1892 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1893 				 MGMT_STATUS_BUSY);
1894 		goto failed;
1895 	}
1896 
1897 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1898 	if (!cmd) {
1899 		err = -ENOMEM;
1900 		goto failed;
1901 	}
1902 
1903 	hci_req_init(&req, hdev);
1904 
1905 	/* If BR/EDR is not enabled and we disable advertising as a
1906 	 * by-product of disabling connectable, we need to update the
1907 	 * advertising flags.
1908 	 */
1909 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1910 		if (!cp->val) {
1911 			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1912 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1913 		}
1914 		update_adv_data(&req);
1915 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1916 		if (cp->val) {
1917 			scan = SCAN_PAGE;
1918 		} else {
1919 			/* If we don't have any whitelist entries just
1920 			 * disable all scanning. If there are entries
1921 			 * and we had both page and inquiry scanning
1922 			 * enabled then fall back to only page scanning.
1923 			 * Otherwise no changes are needed.
1924 			 */
1925 			if (list_empty(&hdev->whitelist))
1926 				scan = SCAN_DISABLED;
1927 			else if (test_bit(HCI_ISCAN, &hdev->flags))
1928 				scan = SCAN_PAGE;
1929 			else
1930 				goto no_scan_update;
1931 
1932 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1933 			    hdev->discov_timeout > 0)
1934 				cancel_delayed_work(&hdev->discov_off);
1935 		}
1936 
1937 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1938 	}
1939 
1940 no_scan_update:
1941 	/* If we're going from non-connectable to connectable or
1942 	 * vice-versa when fast connectable is enabled ensure that fast
1943 	 * connectable gets disabled. write_fast_connectable won't do
1944 	 * anything if the page scan parameters are already what they
1945 	 * should be.
1946 	 */
1947 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1948 		write_fast_connectable(&req, false);
1949 
1950 	/* Update the advertising parameters if necessary */
1951 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1952 		enable_advertising(&req);
1953 
1954 	err = hci_req_run(&req, set_connectable_complete);
1955 	if (err < 0) {
1956 		mgmt_pending_remove(cmd);
1957 		if (err == -ENODATA)
1958 			err = set_connectable_update_settings(hdev, sk,
1959 							      cp->val);
1960 		goto failed;
1961 	}
1962 
1963 failed:
1964 	hci_dev_unlock(hdev);
1965 	return err;
1966 }
1967 
1968 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1969 			u16 len)
1970 {
1971 	struct mgmt_mode *cp = data;
1972 	bool changed;
1973 	int err;
1974 
1975 	BT_DBG("request for %s", hdev->name);
1976 
1977 	if (cp->val != 0x00 && cp->val != 0x01)
1978 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1979 				  MGMT_STATUS_INVALID_PARAMS);
1980 
1981 	hci_dev_lock(hdev);
1982 
1983 	if (cp->val)
1984 		changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1985 	else
1986 		changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1987 
1988 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1989 	if (err < 0)
1990 		goto unlock;
1991 
1992 	if (changed)
1993 		err = new_settings(hdev, sk);
1994 
1995 unlock:
1996 	hci_dev_unlock(hdev);
1997 	return err;
1998 }
1999 
2000 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2001 			     u16 len)
2002 {
2003 	struct mgmt_mode *cp = data;
2004 	struct pending_cmd *cmd;
2005 	u8 val, status;
2006 	int err;
2007 
2008 	BT_DBG("request for %s", hdev->name);
2009 
2010 	status = mgmt_bredr_support(hdev);
2011 	if (status)
2012 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2013 				  status);
2014 
2015 	if (cp->val != 0x00 && cp->val != 0x01)
2016 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2017 				  MGMT_STATUS_INVALID_PARAMS);
2018 
2019 	hci_dev_lock(hdev);
2020 
2021 	if (!hdev_is_powered(hdev)) {
2022 		bool changed = false;
2023 
2024 		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2025 					  &hdev->dev_flags)) {
2026 			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2027 			changed = true;
2028 		}
2029 
2030 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2031 		if (err < 0)
2032 			goto failed;
2033 
2034 		if (changed)
2035 			err = new_settings(hdev, sk);
2036 
2037 		goto failed;
2038 	}
2039 
2040 	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2041 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2042 				 MGMT_STATUS_BUSY);
2043 		goto failed;
2044 	}
2045 
2046 	val = !!cp->val;
2047 
2048 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2049 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2050 		goto failed;
2051 	}
2052 
2053 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2054 	if (!cmd) {
2055 		err = -ENOMEM;
2056 		goto failed;
2057 	}
2058 
2059 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2060 	if (err < 0) {
2061 		mgmt_pending_remove(cmd);
2062 		goto failed;
2063 	}
2064 
2065 failed:
2066 	hci_dev_unlock(hdev);
2067 	return err;
2068 }
2069 
2070 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2071 {
2072 	struct mgmt_mode *cp = data;
2073 	struct pending_cmd *cmd;
2074 	u8 status;
2075 	int err;
2076 
2077 	BT_DBG("request for %s", hdev->name);
2078 
2079 	status = mgmt_bredr_support(hdev);
2080 	if (status)
2081 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2082 
2083 	if (!lmp_ssp_capable(hdev))
2084 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2085 				  MGMT_STATUS_NOT_SUPPORTED);
2086 
2087 	if (cp->val != 0x00 && cp->val != 0x01)
2088 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2089 				  MGMT_STATUS_INVALID_PARAMS);
2090 
2091 	hci_dev_lock(hdev);
2092 
2093 	if (!hdev_is_powered(hdev)) {
2094 		bool changed;
2095 
2096 		if (cp->val) {
2097 			changed = !test_and_set_bit(HCI_SSP_ENABLED,
2098 						    &hdev->dev_flags);
2099 		} else {
2100 			changed = test_and_clear_bit(HCI_SSP_ENABLED,
2101 						     &hdev->dev_flags);
2102 			if (!changed)
2103 				changed = test_and_clear_bit(HCI_HS_ENABLED,
2104 							     &hdev->dev_flags);
2105 			else
2106 				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2107 		}
2108 
2109 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2110 		if (err < 0)
2111 			goto failed;
2112 
2113 		if (changed)
2114 			err = new_settings(hdev, sk);
2115 
2116 		goto failed;
2117 	}
2118 
2119 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2120 	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2121 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2122 				 MGMT_STATUS_BUSY);
2123 		goto failed;
2124 	}
2125 
2126 	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2127 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2128 		goto failed;
2129 	}
2130 
2131 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2132 	if (!cmd) {
2133 		err = -ENOMEM;
2134 		goto failed;
2135 	}
2136 
2137 	if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2138 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2139 			     sizeof(cp->val), &cp->val);
2140 
2141 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2142 	if (err < 0) {
2143 		mgmt_pending_remove(cmd);
2144 		goto failed;
2145 	}
2146 
2147 failed:
2148 	hci_dev_unlock(hdev);
2149 	return err;
2150 }
2151 
2152 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2153 {
2154 	struct mgmt_mode *cp = data;
2155 	bool changed;
2156 	u8 status;
2157 	int err;
2158 
2159 	BT_DBG("request for %s", hdev->name);
2160 
2161 	status = mgmt_bredr_support(hdev);
2162 	if (status)
2163 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2164 
2165 	if (!lmp_ssp_capable(hdev))
2166 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2167 				  MGMT_STATUS_NOT_SUPPORTED);
2168 
2169 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2170 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2171 				  MGMT_STATUS_REJECTED);
2172 
2173 	if (cp->val != 0x00 && cp->val != 0x01)
2174 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2175 				  MGMT_STATUS_INVALID_PARAMS);
2176 
2177 	hci_dev_lock(hdev);
2178 
2179 	if (cp->val) {
2180 		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2181 	} else {
2182 		if (hdev_is_powered(hdev)) {
2183 			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2184 					 MGMT_STATUS_REJECTED);
2185 			goto unlock;
2186 		}
2187 
2188 		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2189 	}
2190 
2191 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2192 	if (err < 0)
2193 		goto unlock;
2194 
2195 	if (changed)
2196 		err = new_settings(hdev, sk);
2197 
2198 unlock:
2199 	hci_dev_unlock(hdev);
2200 	return err;
2201 }
2202 
2203 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2204 {
2205 	struct cmd_lookup match = { NULL, hdev };
2206 
2207 	hci_dev_lock(hdev);
2208 
2209 	if (status) {
2210 		u8 mgmt_err = mgmt_status(status);
2211 
2212 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2213 				     &mgmt_err);
2214 		goto unlock;
2215 	}
2216 
2217 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2218 
2219 	new_settings(hdev, match.sk);
2220 
2221 	if (match.sk)
2222 		sock_put(match.sk);
2223 
2224 	/* Make sure the controller has a good default for
2225 	 * advertising data. Restrict the update to when LE
2226 	 * has actually been enabled. During power on, the
2227 	 * update in powered_update_hci will take care of it.
2228 	 */
2229 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2230 		struct hci_request req;
2231 
2232 		hci_req_init(&req, hdev);
2233 		update_adv_data(&req);
2234 		update_scan_rsp_data(&req);
2235 		__hci_update_background_scan(&req);
2236 		hci_req_run(&req, NULL);
2237 	}
2238 
2239 unlock:
2240 	hci_dev_unlock(hdev);
2241 }
2242 
2243 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2244 {
2245 	struct mgmt_mode *cp = data;
2246 	struct hci_cp_write_le_host_supported hci_cp;
2247 	struct pending_cmd *cmd;
2248 	struct hci_request req;
2249 	int err;
2250 	u8 val, enabled;
2251 
2252 	BT_DBG("request for %s", hdev->name);
2253 
2254 	if (!lmp_le_capable(hdev))
2255 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2256 				  MGMT_STATUS_NOT_SUPPORTED);
2257 
2258 	if (cp->val != 0x00 && cp->val != 0x01)
2259 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2260 				  MGMT_STATUS_INVALID_PARAMS);
2261 
2262 	/* LE-only devices do not allow toggling LE on/off */
2263 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2264 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2265 				  MGMT_STATUS_REJECTED);
2266 
2267 	hci_dev_lock(hdev);
2268 
2269 	val = !!cp->val;
2270 	enabled = lmp_host_le_capable(hdev);
2271 
2272 	if (!hdev_is_powered(hdev) || val == enabled) {
2273 		bool changed = false;
2274 
2275 		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2276 			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2277 			changed = true;
2278 		}
2279 
2280 		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2281 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2282 			changed = true;
2283 		}
2284 
2285 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2286 		if (err < 0)
2287 			goto unlock;
2288 
2289 		if (changed)
2290 			err = new_settings(hdev, sk);
2291 
2292 		goto unlock;
2293 	}
2294 
2295 	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2296 	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2297 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2298 				 MGMT_STATUS_BUSY);
2299 		goto unlock;
2300 	}
2301 
2302 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2303 	if (!cmd) {
2304 		err = -ENOMEM;
2305 		goto unlock;
2306 	}
2307 
2308 	hci_req_init(&req, hdev);
2309 
2310 	memset(&hci_cp, 0, sizeof(hci_cp));
2311 
2312 	if (val) {
2313 		hci_cp.le = val;
2314 		hci_cp.simul = 0x00;
2315 	} else {
2316 		if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2317 			disable_advertising(&req);
2318 	}
2319 
2320 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2321 		    &hci_cp);
2322 
2323 	err = hci_req_run(&req, le_enable_complete);
2324 	if (err < 0)
2325 		mgmt_pending_remove(cmd);
2326 
2327 unlock:
2328 	hci_dev_unlock(hdev);
2329 	return err;
2330 }
2331 
2332 /* This is a helper function to test for pending mgmt commands that can
2333  * cause CoD or EIR HCI commands. We can only allow one such pending
2334  * mgmt command at a time since otherwise we cannot easily track what
2335  * the current values are, will be, and based on that calculate if a new
2336  * HCI command needs to be sent and if yes with what value.
2337  */
2338 static bool pending_eir_or_class(struct hci_dev *hdev)
2339 {
2340 	struct pending_cmd *cmd;
2341 
2342 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2343 		switch (cmd->opcode) {
2344 		case MGMT_OP_ADD_UUID:
2345 		case MGMT_OP_REMOVE_UUID:
2346 		case MGMT_OP_SET_DEV_CLASS:
2347 		case MGMT_OP_SET_POWERED:
2348 			return true;
2349 		}
2350 	}
2351 
2352 	return false;
2353 }
2354 
2355 static const u8 bluetooth_base_uuid[] = {
2356 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2357 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2358 };
2359 
2360 static u8 get_uuid_size(const u8 *uuid)
2361 {
2362 	u32 val;
2363 
2364 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2365 		return 128;
2366 
2367 	val = get_unaligned_le32(&uuid[12]);
2368 	if (val > 0xffff)
2369 		return 32;
2370 
2371 	return 16;
2372 }
2373 
2374 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2375 {
2376 	struct pending_cmd *cmd;
2377 
2378 	hci_dev_lock(hdev);
2379 
2380 	cmd = mgmt_pending_find(mgmt_op, hdev);
2381 	if (!cmd)
2382 		goto unlock;
2383 
2384 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2385 		     hdev->dev_class, 3);
2386 
2387 	mgmt_pending_remove(cmd);
2388 
2389 unlock:
2390 	hci_dev_unlock(hdev);
2391 }
2392 
2393 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2394 {
2395 	BT_DBG("status 0x%02x", status);
2396 
2397 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2398 }
2399 
2400 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2401 {
2402 	struct mgmt_cp_add_uuid *cp = data;
2403 	struct pending_cmd *cmd;
2404 	struct hci_request req;
2405 	struct bt_uuid *uuid;
2406 	int err;
2407 
2408 	BT_DBG("request for %s", hdev->name);
2409 
2410 	hci_dev_lock(hdev);
2411 
2412 	if (pending_eir_or_class(hdev)) {
2413 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2414 				 MGMT_STATUS_BUSY);
2415 		goto failed;
2416 	}
2417 
2418 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2419 	if (!uuid) {
2420 		err = -ENOMEM;
2421 		goto failed;
2422 	}
2423 
2424 	memcpy(uuid->uuid, cp->uuid, 16);
2425 	uuid->svc_hint = cp->svc_hint;
2426 	uuid->size = get_uuid_size(cp->uuid);
2427 
2428 	list_add_tail(&uuid->list, &hdev->uuids);
2429 
2430 	hci_req_init(&req, hdev);
2431 
2432 	update_class(&req);
2433 	update_eir(&req);
2434 
2435 	err = hci_req_run(&req, add_uuid_complete);
2436 	if (err < 0) {
2437 		if (err != -ENODATA)
2438 			goto failed;
2439 
2440 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2441 				   hdev->dev_class, 3);
2442 		goto failed;
2443 	}
2444 
2445 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2446 	if (!cmd) {
2447 		err = -ENOMEM;
2448 		goto failed;
2449 	}
2450 
2451 	err = 0;
2452 
2453 failed:
2454 	hci_dev_unlock(hdev);
2455 	return err;
2456 }
2457 
2458 static bool enable_service_cache(struct hci_dev *hdev)
2459 {
2460 	if (!hdev_is_powered(hdev))
2461 		return false;
2462 
2463 	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2464 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2465 				   CACHE_TIMEOUT);
2466 		return true;
2467 	}
2468 
2469 	return false;
2470 }
2471 
2472 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2473 {
2474 	BT_DBG("status 0x%02x", status);
2475 
2476 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2477 }
2478 
2479 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2480 		       u16 len)
2481 {
2482 	struct mgmt_cp_remove_uuid *cp = data;
2483 	struct pending_cmd *cmd;
2484 	struct bt_uuid *match, *tmp;
2485 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2486 	struct hci_request req;
2487 	int err, found;
2488 
2489 	BT_DBG("request for %s", hdev->name);
2490 
2491 	hci_dev_lock(hdev);
2492 
2493 	if (pending_eir_or_class(hdev)) {
2494 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2495 				 MGMT_STATUS_BUSY);
2496 		goto unlock;
2497 	}
2498 
2499 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2500 		hci_uuids_clear(hdev);
2501 
2502 		if (enable_service_cache(hdev)) {
2503 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2504 					   0, hdev->dev_class, 3);
2505 			goto unlock;
2506 		}
2507 
2508 		goto update_class;
2509 	}
2510 
2511 	found = 0;
2512 
2513 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2514 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2515 			continue;
2516 
2517 		list_del(&match->list);
2518 		kfree(match);
2519 		found++;
2520 	}
2521 
2522 	if (found == 0) {
2523 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2524 				 MGMT_STATUS_INVALID_PARAMS);
2525 		goto unlock;
2526 	}
2527 
2528 update_class:
2529 	hci_req_init(&req, hdev);
2530 
2531 	update_class(&req);
2532 	update_eir(&req);
2533 
2534 	err = hci_req_run(&req, remove_uuid_complete);
2535 	if (err < 0) {
2536 		if (err != -ENODATA)
2537 			goto unlock;
2538 
2539 		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2540 				   hdev->dev_class, 3);
2541 		goto unlock;
2542 	}
2543 
2544 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2545 	if (!cmd) {
2546 		err = -ENOMEM;
2547 		goto unlock;
2548 	}
2549 
2550 	err = 0;
2551 
2552 unlock:
2553 	hci_dev_unlock(hdev);
2554 	return err;
2555 }
2556 
2557 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2558 {
2559 	BT_DBG("status 0x%02x", status);
2560 
2561 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2562 }
2563 
2564 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2565 			 u16 len)
2566 {
2567 	struct mgmt_cp_set_dev_class *cp = data;
2568 	struct pending_cmd *cmd;
2569 	struct hci_request req;
2570 	int err;
2571 
2572 	BT_DBG("request for %s", hdev->name);
2573 
2574 	if (!lmp_bredr_capable(hdev))
2575 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2576 				  MGMT_STATUS_NOT_SUPPORTED);
2577 
2578 	hci_dev_lock(hdev);
2579 
2580 	if (pending_eir_or_class(hdev)) {
2581 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2582 				 MGMT_STATUS_BUSY);
2583 		goto unlock;
2584 	}
2585 
2586 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2587 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2588 				 MGMT_STATUS_INVALID_PARAMS);
2589 		goto unlock;
2590 	}
2591 
2592 	hdev->major_class = cp->major;
2593 	hdev->minor_class = cp->minor;
2594 
2595 	if (!hdev_is_powered(hdev)) {
2596 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2597 				   hdev->dev_class, 3);
2598 		goto unlock;
2599 	}
2600 
2601 	hci_req_init(&req, hdev);
2602 
2603 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2604 		hci_dev_unlock(hdev);
2605 		cancel_delayed_work_sync(&hdev->service_cache);
2606 		hci_dev_lock(hdev);
2607 		update_eir(&req);
2608 	}
2609 
2610 	update_class(&req);
2611 
2612 	err = hci_req_run(&req, set_class_complete);
2613 	if (err < 0) {
2614 		if (err != -ENODATA)
2615 			goto unlock;
2616 
2617 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2618 				   hdev->dev_class, 3);
2619 		goto unlock;
2620 	}
2621 
2622 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2623 	if (!cmd) {
2624 		err = -ENOMEM;
2625 		goto unlock;
2626 	}
2627 
2628 	err = 0;
2629 
2630 unlock:
2631 	hci_dev_unlock(hdev);
2632 	return err;
2633 }
2634 
2635 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2636 			  u16 len)
2637 {
2638 	struct mgmt_cp_load_link_keys *cp = data;
2639 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2640 				   sizeof(struct mgmt_link_key_info));
2641 	u16 key_count, expected_len;
2642 	bool changed;
2643 	int i;
2644 
2645 	BT_DBG("request for %s", hdev->name);
2646 
2647 	if (!lmp_bredr_capable(hdev))
2648 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2649 				  MGMT_STATUS_NOT_SUPPORTED);
2650 
2651 	key_count = __le16_to_cpu(cp->key_count);
2652 	if (key_count > max_key_count) {
2653 		BT_ERR("load_link_keys: too big key_count value %u",
2654 		       key_count);
2655 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2656 				  MGMT_STATUS_INVALID_PARAMS);
2657 	}
2658 
2659 	expected_len = sizeof(*cp) + key_count *
2660 					sizeof(struct mgmt_link_key_info);
2661 	if (expected_len != len) {
2662 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2663 		       expected_len, len);
2664 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2665 				  MGMT_STATUS_INVALID_PARAMS);
2666 	}
2667 
2668 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2669 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2670 				  MGMT_STATUS_INVALID_PARAMS);
2671 
2672 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2673 	       key_count);
2674 
2675 	for (i = 0; i < key_count; i++) {
2676 		struct mgmt_link_key_info *key = &cp->keys[i];
2677 
2678 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2679 			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2680 					  MGMT_STATUS_INVALID_PARAMS);
2681 	}
2682 
2683 	hci_dev_lock(hdev);
2684 
2685 	hci_link_keys_clear(hdev);
2686 
2687 	if (cp->debug_keys)
2688 		changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2689 					    &hdev->dev_flags);
2690 	else
2691 		changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2692 					     &hdev->dev_flags);
2693 
2694 	if (changed)
2695 		new_settings(hdev, NULL);
2696 
2697 	for (i = 0; i < key_count; i++) {
2698 		struct mgmt_link_key_info *key = &cp->keys[i];
2699 
2700 		/* Always ignore debug keys and require a new pairing if
2701 		 * the user wants to use them.
2702 		 */
2703 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2704 			continue;
2705 
2706 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2707 				 key->type, key->pin_len, NULL);
2708 	}
2709 
2710 	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2711 
2712 	hci_dev_unlock(hdev);
2713 
2714 	return 0;
2715 }
2716 
2717 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2718 			   u8 addr_type, struct sock *skip_sk)
2719 {
2720 	struct mgmt_ev_device_unpaired ev;
2721 
2722 	bacpy(&ev.addr.bdaddr, bdaddr);
2723 	ev.addr.type = addr_type;
2724 
2725 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2726 			  skip_sk);
2727 }
2728 
2729 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2730 			 u16 len)
2731 {
2732 	struct mgmt_cp_unpair_device *cp = data;
2733 	struct mgmt_rp_unpair_device rp;
2734 	struct hci_cp_disconnect dc;
2735 	struct pending_cmd *cmd;
2736 	struct hci_conn *conn;
2737 	int err;
2738 
2739 	memset(&rp, 0, sizeof(rp));
2740 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2741 	rp.addr.type = cp->addr.type;
2742 
2743 	if (!bdaddr_type_is_valid(cp->addr.type))
2744 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2745 				    MGMT_STATUS_INVALID_PARAMS,
2746 				    &rp, sizeof(rp));
2747 
2748 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2749 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2750 				    MGMT_STATUS_INVALID_PARAMS,
2751 				    &rp, sizeof(rp));
2752 
2753 	hci_dev_lock(hdev);
2754 
2755 	if (!hdev_is_powered(hdev)) {
2756 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2757 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2758 		goto unlock;
2759 	}
2760 
2761 	if (cp->addr.type == BDADDR_BREDR) {
2762 		/* If disconnection is requested, then look up the
2763 		 * connection. If the remote device is connected, it
2764 		 * will be later used to terminate the link.
2765 		 *
2766 		 * Setting it to NULL explicitly will cause no
2767 		 * termination of the link.
2768 		 */
2769 		if (cp->disconnect)
2770 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2771 						       &cp->addr.bdaddr);
2772 		else
2773 			conn = NULL;
2774 
2775 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2776 	} else {
2777 		u8 addr_type;
2778 
2779 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2780 					       &cp->addr.bdaddr);
2781 		if (conn) {
2782 			/* Defer clearing up the connection parameters
2783 			 * until closing to give a chance of keeping
2784 			 * them if a repairing happens.
2785 			 */
2786 			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2787 
2788 			/* If disconnection is not requested, then
2789 			 * clear the connection variable so that the
2790 			 * link is not terminated.
2791 			 */
2792 			if (!cp->disconnect)
2793 				conn = NULL;
2794 		}
2795 
2796 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2797 			addr_type = ADDR_LE_DEV_PUBLIC;
2798 		else
2799 			addr_type = ADDR_LE_DEV_RANDOM;
2800 
2801 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2802 
2803 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2804 	}
2805 
2806 	if (err < 0) {
2807 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2808 				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2809 		goto unlock;
2810 	}
2811 
2812 	/* If the connection variable is set, then termination of the
2813 	 * link is requested.
2814 	 */
2815 	if (!conn) {
2816 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2817 				   &rp, sizeof(rp));
2818 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2819 		goto unlock;
2820 	}
2821 
2822 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2823 			       sizeof(*cp));
2824 	if (!cmd) {
2825 		err = -ENOMEM;
2826 		goto unlock;
2827 	}
2828 
2829 	cmd->cmd_complete = addr_cmd_complete;
2830 
2831 	dc.handle = cpu_to_le16(conn->handle);
2832 	dc.reason = 0x13; /* Remote User Terminated Connection */
2833 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2834 	if (err < 0)
2835 		mgmt_pending_remove(cmd);
2836 
2837 unlock:
2838 	hci_dev_unlock(hdev);
2839 	return err;
2840 }
2841 
2842 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2843 		      u16 len)
2844 {
2845 	struct mgmt_cp_disconnect *cp = data;
2846 	struct mgmt_rp_disconnect rp;
2847 	struct pending_cmd *cmd;
2848 	struct hci_conn *conn;
2849 	int err;
2850 
2851 	BT_DBG("");
2852 
2853 	memset(&rp, 0, sizeof(rp));
2854 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2855 	rp.addr.type = cp->addr.type;
2856 
2857 	if (!bdaddr_type_is_valid(cp->addr.type))
2858 		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2859 				    MGMT_STATUS_INVALID_PARAMS,
2860 				    &rp, sizeof(rp));
2861 
2862 	hci_dev_lock(hdev);
2863 
2864 	if (!test_bit(HCI_UP, &hdev->flags)) {
2865 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2866 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2867 		goto failed;
2868 	}
2869 
2870 	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2871 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2872 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2873 		goto failed;
2874 	}
2875 
2876 	if (cp->addr.type == BDADDR_BREDR)
2877 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2878 					       &cp->addr.bdaddr);
2879 	else
2880 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2881 
2882 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2883 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2884 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2885 		goto failed;
2886 	}
2887 
2888 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2889 	if (!cmd) {
2890 		err = -ENOMEM;
2891 		goto failed;
2892 	}
2893 
2894 	cmd->cmd_complete = generic_cmd_complete;
2895 
2896 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2897 	if (err < 0)
2898 		mgmt_pending_remove(cmd);
2899 
2900 failed:
2901 	hci_dev_unlock(hdev);
2902 	return err;
2903 }
2904 
2905 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2906 {
2907 	switch (link_type) {
2908 	case LE_LINK:
2909 		switch (addr_type) {
2910 		case ADDR_LE_DEV_PUBLIC:
2911 			return BDADDR_LE_PUBLIC;
2912 
2913 		default:
2914 			/* Fallback to LE Random address type */
2915 			return BDADDR_LE_RANDOM;
2916 		}
2917 
2918 	default:
2919 		/* Fallback to BR/EDR type */
2920 		return BDADDR_BREDR;
2921 	}
2922 }
2923 
2924 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2925 			   u16 data_len)
2926 {
2927 	struct mgmt_rp_get_connections *rp;
2928 	struct hci_conn *c;
2929 	size_t rp_len;
2930 	int err;
2931 	u16 i;
2932 
2933 	BT_DBG("");
2934 
2935 	hci_dev_lock(hdev);
2936 
2937 	if (!hdev_is_powered(hdev)) {
2938 		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2939 				 MGMT_STATUS_NOT_POWERED);
2940 		goto unlock;
2941 	}
2942 
2943 	i = 0;
2944 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2945 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2946 			i++;
2947 	}
2948 
2949 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2950 	rp = kmalloc(rp_len, GFP_KERNEL);
2951 	if (!rp) {
2952 		err = -ENOMEM;
2953 		goto unlock;
2954 	}
2955 
2956 	i = 0;
2957 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2958 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2959 			continue;
2960 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2961 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2962 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2963 			continue;
2964 		i++;
2965 	}
2966 
2967 	rp->conn_count = cpu_to_le16(i);
2968 
2969 	/* Recalculate length in case of filtered SCO connections, etc */
2970 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2971 
2972 	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2973 			   rp_len);
2974 
2975 	kfree(rp);
2976 
2977 unlock:
2978 	hci_dev_unlock(hdev);
2979 	return err;
2980 }
2981 
2982 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2983 				   struct mgmt_cp_pin_code_neg_reply *cp)
2984 {
2985 	struct pending_cmd *cmd;
2986 	int err;
2987 
2988 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2989 			       sizeof(*cp));
2990 	if (!cmd)
2991 		return -ENOMEM;
2992 
2993 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2994 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2995 	if (err < 0)
2996 		mgmt_pending_remove(cmd);
2997 
2998 	return err;
2999 }
3000 
3001 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3002 			  u16 len)
3003 {
3004 	struct hci_conn *conn;
3005 	struct mgmt_cp_pin_code_reply *cp = data;
3006 	struct hci_cp_pin_code_reply reply;
3007 	struct pending_cmd *cmd;
3008 	int err;
3009 
3010 	BT_DBG("");
3011 
3012 	hci_dev_lock(hdev);
3013 
3014 	if (!hdev_is_powered(hdev)) {
3015 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3016 				 MGMT_STATUS_NOT_POWERED);
3017 		goto failed;
3018 	}
3019 
3020 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3021 	if (!conn) {
3022 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3023 				 MGMT_STATUS_NOT_CONNECTED);
3024 		goto failed;
3025 	}
3026 
3027 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3028 		struct mgmt_cp_pin_code_neg_reply ncp;
3029 
3030 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3031 
3032 		BT_ERR("PIN code is not 16 bytes long");
3033 
3034 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3035 		if (err >= 0)
3036 			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3037 					 MGMT_STATUS_INVALID_PARAMS);
3038 
3039 		goto failed;
3040 	}
3041 
3042 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3043 	if (!cmd) {
3044 		err = -ENOMEM;
3045 		goto failed;
3046 	}
3047 
3048 	cmd->cmd_complete = addr_cmd_complete;
3049 
3050 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3051 	reply.pin_len = cp->pin_len;
3052 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3053 
3054 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3055 	if (err < 0)
3056 		mgmt_pending_remove(cmd);
3057 
3058 failed:
3059 	hci_dev_unlock(hdev);
3060 	return err;
3061 }
3062 
3063 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3064 			     u16 len)
3065 {
3066 	struct mgmt_cp_set_io_capability *cp = data;
3067 
3068 	BT_DBG("");
3069 
3070 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3071 		return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3072 				    MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3073 
3074 	hci_dev_lock(hdev);
3075 
3076 	hdev->io_capability = cp->io_capability;
3077 
3078 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3079 	       hdev->io_capability);
3080 
3081 	hci_dev_unlock(hdev);
3082 
3083 	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3084 			    0);
3085 }
3086 
3087 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3088 {
3089 	struct hci_dev *hdev = conn->hdev;
3090 	struct pending_cmd *cmd;
3091 
3092 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3093 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3094 			continue;
3095 
3096 		if (cmd->user_data != conn)
3097 			continue;
3098 
3099 		return cmd;
3100 	}
3101 
3102 	return NULL;
3103 }
3104 
3105 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3106 {
3107 	struct mgmt_rp_pair_device rp;
3108 	struct hci_conn *conn = cmd->user_data;
3109 	int err;
3110 
3111 	bacpy(&rp.addr.bdaddr, &conn->dst);
3112 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3113 
3114 	err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3115 			   &rp, sizeof(rp));
3116 
3117 	/* So we don't get further callbacks for this connection */
3118 	conn->connect_cfm_cb = NULL;
3119 	conn->security_cfm_cb = NULL;
3120 	conn->disconn_cfm_cb = NULL;
3121 
3122 	hci_conn_drop(conn);
3123 
3124 	/* The device is paired so there is no need to remove
3125 	 * its connection parameters anymore.
3126 	 */
3127 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3128 
3129 	hci_conn_put(conn);
3130 
3131 	return err;
3132 }
3133 
3134 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3135 {
3136 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3137 	struct pending_cmd *cmd;
3138 
3139 	cmd = find_pairing(conn);
3140 	if (cmd) {
3141 		cmd->cmd_complete(cmd, status);
3142 		mgmt_pending_remove(cmd);
3143 	}
3144 }
3145 
3146 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3147 {
3148 	struct pending_cmd *cmd;
3149 
3150 	BT_DBG("status %u", status);
3151 
3152 	cmd = find_pairing(conn);
3153 	if (!cmd) {
3154 		BT_DBG("Unable to find a pending command");
3155 		return;
3156 	}
3157 
3158 	cmd->cmd_complete(cmd, mgmt_status(status));
3159 	mgmt_pending_remove(cmd);
3160 }
3161 
3162 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3163 {
3164 	struct pending_cmd *cmd;
3165 
3166 	BT_DBG("status %u", status);
3167 
3168 	if (!status)
3169 		return;
3170 
3171 	cmd = find_pairing(conn);
3172 	if (!cmd) {
3173 		BT_DBG("Unable to find a pending command");
3174 		return;
3175 	}
3176 
3177 	cmd->cmd_complete(cmd, mgmt_status(status));
3178 	mgmt_pending_remove(cmd);
3179 }
3180 
3181 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3182 		       u16 len)
3183 {
3184 	struct mgmt_cp_pair_device *cp = data;
3185 	struct mgmt_rp_pair_device rp;
3186 	struct pending_cmd *cmd;
3187 	u8 sec_level, auth_type;
3188 	struct hci_conn *conn;
3189 	int err;
3190 
3191 	BT_DBG("");
3192 
3193 	memset(&rp, 0, sizeof(rp));
3194 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3195 	rp.addr.type = cp->addr.type;
3196 
3197 	if (!bdaddr_type_is_valid(cp->addr.type))
3198 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3199 				    MGMT_STATUS_INVALID_PARAMS,
3200 				    &rp, sizeof(rp));
3201 
3202 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3203 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3204 				    MGMT_STATUS_INVALID_PARAMS,
3205 				    &rp, sizeof(rp));
3206 
3207 	hci_dev_lock(hdev);
3208 
3209 	if (!hdev_is_powered(hdev)) {
3210 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3211 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3212 		goto unlock;
3213 	}
3214 
3215 	sec_level = BT_SECURITY_MEDIUM;
3216 	auth_type = HCI_AT_DEDICATED_BONDING;
3217 
3218 	if (cp->addr.type == BDADDR_BREDR) {
3219 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3220 				       auth_type);
3221 	} else {
3222 		u8 addr_type;
3223 
3224 		/* Convert from L2CAP channel address type to HCI address type
3225 		 */
3226 		if (cp->addr.type == BDADDR_LE_PUBLIC)
3227 			addr_type = ADDR_LE_DEV_PUBLIC;
3228 		else
3229 			addr_type = ADDR_LE_DEV_RANDOM;
3230 
3231 		/* When pairing a new device, it is expected to remember
3232 		 * this device for future connections. Adding the connection
3233 		 * parameter information ahead of time allows tracking
3234 		 * of the slave preferred values and will speed up any
3235 		 * further connection establishment.
3236 		 *
3237 		 * If connection parameters already exist, then they
3238 		 * will be kept and this function does nothing.
3239 		 */
3240 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3241 
3242 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3243 				      sec_level, HCI_LE_CONN_TIMEOUT,
3244 				      HCI_ROLE_MASTER);
3245 	}
3246 
3247 	if (IS_ERR(conn)) {
3248 		int status;
3249 
3250 		if (PTR_ERR(conn) == -EBUSY)
3251 			status = MGMT_STATUS_BUSY;
3252 		else
3253 			status = MGMT_STATUS_CONNECT_FAILED;
3254 
3255 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3256 				   status, &rp,
3257 				   sizeof(rp));
3258 		goto unlock;
3259 	}
3260 
3261 	if (conn->connect_cfm_cb) {
3262 		hci_conn_drop(conn);
3263 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3264 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
3265 		goto unlock;
3266 	}
3267 
3268 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3269 	if (!cmd) {
3270 		err = -ENOMEM;
3271 		hci_conn_drop(conn);
3272 		goto unlock;
3273 	}
3274 
3275 	cmd->cmd_complete = pairing_complete;
3276 
3277 	/* For LE, just connecting isn't a proof that the pairing finished */
3278 	if (cp->addr.type == BDADDR_BREDR) {
3279 		conn->connect_cfm_cb = pairing_complete_cb;
3280 		conn->security_cfm_cb = pairing_complete_cb;
3281 		conn->disconn_cfm_cb = pairing_complete_cb;
3282 	} else {
3283 		conn->connect_cfm_cb = le_pairing_complete_cb;
3284 		conn->security_cfm_cb = le_pairing_complete_cb;
3285 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3286 	}
3287 
3288 	conn->io_capability = cp->io_cap;
3289 	cmd->user_data = hci_conn_get(conn);
3290 
3291 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3292 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3293 		cmd->cmd_complete(cmd, 0);
3294 		mgmt_pending_remove(cmd);
3295 	}
3296 
3297 	err = 0;
3298 
3299 unlock:
3300 	hci_dev_unlock(hdev);
3301 	return err;
3302 }
3303 
3304 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3305 			      u16 len)
3306 {
3307 	struct mgmt_addr_info *addr = data;
3308 	struct pending_cmd *cmd;
3309 	struct hci_conn *conn;
3310 	int err;
3311 
3312 	BT_DBG("");
3313 
3314 	hci_dev_lock(hdev);
3315 
3316 	if (!hdev_is_powered(hdev)) {
3317 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3318 				 MGMT_STATUS_NOT_POWERED);
3319 		goto unlock;
3320 	}
3321 
3322 	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3323 	if (!cmd) {
3324 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3325 				 MGMT_STATUS_INVALID_PARAMS);
3326 		goto unlock;
3327 	}
3328 
3329 	conn = cmd->user_data;
3330 
3331 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3332 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3333 				 MGMT_STATUS_INVALID_PARAMS);
3334 		goto unlock;
3335 	}
3336 
3337 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3338 	mgmt_pending_remove(cmd);
3339 
3340 	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3341 			   addr, sizeof(*addr));
3342 unlock:
3343 	hci_dev_unlock(hdev);
3344 	return err;
3345 }
3346 
3347 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3348 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3349 			     u16 hci_op, __le32 passkey)
3350 {
3351 	struct pending_cmd *cmd;
3352 	struct hci_conn *conn;
3353 	int err;
3354 
3355 	hci_dev_lock(hdev);
3356 
3357 	if (!hdev_is_powered(hdev)) {
3358 		err = cmd_complete(sk, hdev->id, mgmt_op,
3359 				   MGMT_STATUS_NOT_POWERED, addr,
3360 				   sizeof(*addr));
3361 		goto done;
3362 	}
3363 
3364 	if (addr->type == BDADDR_BREDR)
3365 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3366 	else
3367 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3368 
3369 	if (!conn) {
3370 		err = cmd_complete(sk, hdev->id, mgmt_op,
3371 				   MGMT_STATUS_NOT_CONNECTED, addr,
3372 				   sizeof(*addr));
3373 		goto done;
3374 	}
3375 
3376 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3377 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3378 		if (!err)
3379 			err = cmd_complete(sk, hdev->id, mgmt_op,
3380 					   MGMT_STATUS_SUCCESS, addr,
3381 					   sizeof(*addr));
3382 		else
3383 			err = cmd_complete(sk, hdev->id, mgmt_op,
3384 					   MGMT_STATUS_FAILED, addr,
3385 					   sizeof(*addr));
3386 
3387 		goto done;
3388 	}
3389 
3390 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3391 	if (!cmd) {
3392 		err = -ENOMEM;
3393 		goto done;
3394 	}
3395 
3396 	cmd->cmd_complete = addr_cmd_complete;
3397 
3398 	/* Continue with pairing via HCI */
3399 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3400 		struct hci_cp_user_passkey_reply cp;
3401 
3402 		bacpy(&cp.bdaddr, &addr->bdaddr);
3403 		cp.passkey = passkey;
3404 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3405 	} else
3406 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3407 				   &addr->bdaddr);
3408 
3409 	if (err < 0)
3410 		mgmt_pending_remove(cmd);
3411 
3412 done:
3413 	hci_dev_unlock(hdev);
3414 	return err;
3415 }
3416 
3417 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3418 			      void *data, u16 len)
3419 {
3420 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3421 
3422 	BT_DBG("");
3423 
3424 	return user_pairing_resp(sk, hdev, &cp->addr,
3425 				MGMT_OP_PIN_CODE_NEG_REPLY,
3426 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3427 }
3428 
3429 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3430 			      u16 len)
3431 {
3432 	struct mgmt_cp_user_confirm_reply *cp = data;
3433 
3434 	BT_DBG("");
3435 
3436 	if (len != sizeof(*cp))
3437 		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3438 				  MGMT_STATUS_INVALID_PARAMS);
3439 
3440 	return user_pairing_resp(sk, hdev, &cp->addr,
3441 				 MGMT_OP_USER_CONFIRM_REPLY,
3442 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3443 }
3444 
3445 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3446 				  void *data, u16 len)
3447 {
3448 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3449 
3450 	BT_DBG("");
3451 
3452 	return user_pairing_resp(sk, hdev, &cp->addr,
3453 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3454 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3455 }
3456 
3457 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3458 			      u16 len)
3459 {
3460 	struct mgmt_cp_user_passkey_reply *cp = data;
3461 
3462 	BT_DBG("");
3463 
3464 	return user_pairing_resp(sk, hdev, &cp->addr,
3465 				 MGMT_OP_USER_PASSKEY_REPLY,
3466 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3467 }
3468 
3469 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3470 				  void *data, u16 len)
3471 {
3472 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3473 
3474 	BT_DBG("");
3475 
3476 	return user_pairing_resp(sk, hdev, &cp->addr,
3477 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3478 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3479 }
3480 
3481 static void update_name(struct hci_request *req)
3482 {
3483 	struct hci_dev *hdev = req->hdev;
3484 	struct hci_cp_write_local_name cp;
3485 
3486 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3487 
3488 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3489 }
3490 
3491 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3492 {
3493 	struct mgmt_cp_set_local_name *cp;
3494 	struct pending_cmd *cmd;
3495 
3496 	BT_DBG("status 0x%02x", status);
3497 
3498 	hci_dev_lock(hdev);
3499 
3500 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3501 	if (!cmd)
3502 		goto unlock;
3503 
3504 	cp = cmd->param;
3505 
3506 	if (status)
3507 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3508 			   mgmt_status(status));
3509 	else
3510 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3511 			     cp, sizeof(*cp));
3512 
3513 	mgmt_pending_remove(cmd);
3514 
3515 unlock:
3516 	hci_dev_unlock(hdev);
3517 }
3518 
3519 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3520 			  u16 len)
3521 {
3522 	struct mgmt_cp_set_local_name *cp = data;
3523 	struct pending_cmd *cmd;
3524 	struct hci_request req;
3525 	int err;
3526 
3527 	BT_DBG("");
3528 
3529 	hci_dev_lock(hdev);
3530 
3531 	/* If the old values are the same as the new ones just return a
3532 	 * direct command complete event.
3533 	 */
3534 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3535 	    !memcmp(hdev->short_name, cp->short_name,
3536 		    sizeof(hdev->short_name))) {
3537 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3538 				   data, len);
3539 		goto failed;
3540 	}
3541 
3542 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3543 
3544 	if (!hdev_is_powered(hdev)) {
3545 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3546 
3547 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3548 				   data, len);
3549 		if (err < 0)
3550 			goto failed;
3551 
3552 		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3553 				 sk);
3554 
3555 		goto failed;
3556 	}
3557 
3558 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3559 	if (!cmd) {
3560 		err = -ENOMEM;
3561 		goto failed;
3562 	}
3563 
3564 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3565 
3566 	hci_req_init(&req, hdev);
3567 
3568 	if (lmp_bredr_capable(hdev)) {
3569 		update_name(&req);
3570 		update_eir(&req);
3571 	}
3572 
3573 	/* The name is stored in the scan response data and so
3574 	 * no need to udpate the advertising data here.
3575 	 */
3576 	if (lmp_le_capable(hdev))
3577 		update_scan_rsp_data(&req);
3578 
3579 	err = hci_req_run(&req, set_name_complete);
3580 	if (err < 0)
3581 		mgmt_pending_remove(cmd);
3582 
3583 failed:
3584 	hci_dev_unlock(hdev);
3585 	return err;
3586 }
3587 
3588 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3589 			       void *data, u16 data_len)
3590 {
3591 	struct pending_cmd *cmd;
3592 	int err;
3593 
3594 	BT_DBG("%s", hdev->name);
3595 
3596 	hci_dev_lock(hdev);
3597 
3598 	if (!hdev_is_powered(hdev)) {
3599 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3600 				 MGMT_STATUS_NOT_POWERED);
3601 		goto unlock;
3602 	}
3603 
3604 	if (!lmp_ssp_capable(hdev)) {
3605 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3606 				 MGMT_STATUS_NOT_SUPPORTED);
3607 		goto unlock;
3608 	}
3609 
3610 	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3611 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3612 				 MGMT_STATUS_BUSY);
3613 		goto unlock;
3614 	}
3615 
3616 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3617 	if (!cmd) {
3618 		err = -ENOMEM;
3619 		goto unlock;
3620 	}
3621 
3622 	if (bredr_sc_enabled(hdev))
3623 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3624 				   0, NULL);
3625 	else
3626 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3627 
3628 	if (err < 0)
3629 		mgmt_pending_remove(cmd);
3630 
3631 unlock:
3632 	hci_dev_unlock(hdev);
3633 	return err;
3634 }
3635 
3636 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3637 			       void *data, u16 len)
3638 {
3639 	struct mgmt_addr_info *addr = data;
3640 	int err;
3641 
3642 	BT_DBG("%s ", hdev->name);
3643 
3644 	if (!bdaddr_type_is_valid(addr->type))
3645 		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3646 				    MGMT_STATUS_INVALID_PARAMS, addr,
3647 				    sizeof(*addr));
3648 
3649 	hci_dev_lock(hdev);
3650 
3651 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3652 		struct mgmt_cp_add_remote_oob_data *cp = data;
3653 		u8 status;
3654 
3655 		if (cp->addr.type != BDADDR_BREDR) {
3656 			err = cmd_complete(sk, hdev->id,
3657 					   MGMT_OP_ADD_REMOTE_OOB_DATA,
3658 					   MGMT_STATUS_INVALID_PARAMS,
3659 					   &cp->addr, sizeof(cp->addr));
3660 			goto unlock;
3661 		}
3662 
3663 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3664 					      cp->addr.type, cp->hash,
3665 					      cp->rand, NULL, NULL);
3666 		if (err < 0)
3667 			status = MGMT_STATUS_FAILED;
3668 		else
3669 			status = MGMT_STATUS_SUCCESS;
3670 
3671 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3672 				   status, &cp->addr, sizeof(cp->addr));
3673 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3674 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3675 		u8 *rand192, *hash192, *rand256, *hash256;
3676 		u8 status;
3677 
3678 		if (bdaddr_type_is_le(cp->addr.type)) {
3679 			/* Enforce zero-valued 192-bit parameters as
3680 			 * long as legacy SMP OOB isn't implemented.
3681 			 */
3682 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3683 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3684 				err = cmd_complete(sk, hdev->id,
3685 						   MGMT_OP_ADD_REMOTE_OOB_DATA,
3686 						   MGMT_STATUS_INVALID_PARAMS,
3687 						   addr, sizeof(*addr));
3688 				goto unlock;
3689 			}
3690 
3691 			rand192 = NULL;
3692 			hash192 = NULL;
3693 		} else {
3694 			/* In case one of the P-192 values is set to zero,
3695 			 * then just disable OOB data for P-192.
3696 			 */
3697 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3698 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3699 				rand192 = NULL;
3700 				hash192 = NULL;
3701 			} else {
3702 				rand192 = cp->rand192;
3703 				hash192 = cp->hash192;
3704 			}
3705 		}
3706 
3707 		/* In case one of the P-256 values is set to zero, then just
3708 		 * disable OOB data for P-256.
3709 		 */
3710 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3711 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3712 			rand256 = NULL;
3713 			hash256 = NULL;
3714 		} else {
3715 			rand256 = cp->rand256;
3716 			hash256 = cp->hash256;
3717 		}
3718 
3719 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3720 					      cp->addr.type, hash192, rand192,
3721 					      hash256, rand256);
3722 		if (err < 0)
3723 			status = MGMT_STATUS_FAILED;
3724 		else
3725 			status = MGMT_STATUS_SUCCESS;
3726 
3727 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3728 				   status, &cp->addr, sizeof(cp->addr));
3729 	} else {
3730 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3731 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3732 				 MGMT_STATUS_INVALID_PARAMS);
3733 	}
3734 
3735 unlock:
3736 	hci_dev_unlock(hdev);
3737 	return err;
3738 }
3739 
3740 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3741 				  void *data, u16 len)
3742 {
3743 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3744 	u8 status;
3745 	int err;
3746 
3747 	BT_DBG("%s", hdev->name);
3748 
3749 	if (cp->addr.type != BDADDR_BREDR)
3750 		return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3751 				    MGMT_STATUS_INVALID_PARAMS,
3752 				    &cp->addr, sizeof(cp->addr));
3753 
3754 	hci_dev_lock(hdev);
3755 
3756 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3757 		hci_remote_oob_data_clear(hdev);
3758 		status = MGMT_STATUS_SUCCESS;
3759 		goto done;
3760 	}
3761 
3762 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3763 	if (err < 0)
3764 		status = MGMT_STATUS_INVALID_PARAMS;
3765 	else
3766 		status = MGMT_STATUS_SUCCESS;
3767 
3768 done:
3769 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3770 			   status, &cp->addr, sizeof(cp->addr));
3771 
3772 	hci_dev_unlock(hdev);
3773 	return err;
3774 }
3775 
3776 static bool trigger_discovery(struct hci_request *req, u8 *status)
3777 {
3778 	struct hci_dev *hdev = req->hdev;
3779 	struct hci_cp_le_set_scan_param param_cp;
3780 	struct hci_cp_le_set_scan_enable enable_cp;
3781 	struct hci_cp_inquiry inq_cp;
3782 	/* General inquiry access code (GIAC) */
3783 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3784 	u8 own_addr_type;
3785 	int err;
3786 
3787 	switch (hdev->discovery.type) {
3788 	case DISCOV_TYPE_BREDR:
3789 		*status = mgmt_bredr_support(hdev);
3790 		if (*status)
3791 			return false;
3792 
3793 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3794 			*status = MGMT_STATUS_BUSY;
3795 			return false;
3796 		}
3797 
3798 		hci_inquiry_cache_flush(hdev);
3799 
3800 		memset(&inq_cp, 0, sizeof(inq_cp));
3801 		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3802 		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3803 		hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3804 		break;
3805 
3806 	case DISCOV_TYPE_LE:
3807 	case DISCOV_TYPE_INTERLEAVED:
3808 		*status = mgmt_le_support(hdev);
3809 		if (*status)
3810 			return false;
3811 
3812 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3813 		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3814 			*status = MGMT_STATUS_NOT_SUPPORTED;
3815 			return false;
3816 		}
3817 
3818 		if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3819 			/* Don't let discovery abort an outgoing
3820 			 * connection attempt that's using directed
3821 			 * advertising.
3822 			 */
3823 			if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3824 						       BT_CONNECT)) {
3825 				*status = MGMT_STATUS_REJECTED;
3826 				return false;
3827 			}
3828 
3829 			disable_advertising(req);
3830 		}
3831 
3832 		/* If controller is scanning, it means the background scanning
3833 		 * is running. Thus, we should temporarily stop it in order to
3834 		 * set the discovery scanning parameters.
3835 		 */
3836 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3837 			hci_req_add_le_scan_disable(req);
3838 
3839 		memset(&param_cp, 0, sizeof(param_cp));
3840 
3841 		/* All active scans will be done with either a resolvable
3842 		 * private address (when privacy feature has been enabled)
3843 		 * or non-resolvable private address.
3844 		 */
3845 		err = hci_update_random_address(req, true, &own_addr_type);
3846 		if (err < 0) {
3847 			*status = MGMT_STATUS_FAILED;
3848 			return false;
3849 		}
3850 
3851 		param_cp.type = LE_SCAN_ACTIVE;
3852 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3853 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3854 		param_cp.own_address_type = own_addr_type;
3855 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3856 			    &param_cp);
3857 
3858 		memset(&enable_cp, 0, sizeof(enable_cp));
3859 		enable_cp.enable = LE_SCAN_ENABLE;
3860 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3861 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3862 			    &enable_cp);
3863 		break;
3864 
3865 	default:
3866 		*status = MGMT_STATUS_INVALID_PARAMS;
3867 		return false;
3868 	}
3869 
3870 	return true;
3871 }
3872 
3873 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3874 				     u16 opcode)
3875 {
3876 	struct pending_cmd *cmd;
3877 	unsigned long timeout;
3878 
3879 	BT_DBG("status %d", status);
3880 
3881 	hci_dev_lock(hdev);
3882 
3883 	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3884 	if (!cmd)
3885 		cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3886 
3887 	if (cmd) {
3888 		cmd->cmd_complete(cmd, mgmt_status(status));
3889 		mgmt_pending_remove(cmd);
3890 	}
3891 
3892 	if (status) {
3893 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3894 		goto unlock;
3895 	}
3896 
3897 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3898 
3899 	/* If the scan involves LE scan, pick proper timeout to schedule
3900 	 * hdev->le_scan_disable that will stop it.
3901 	 */
3902 	switch (hdev->discovery.type) {
3903 	case DISCOV_TYPE_LE:
3904 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3905 		break;
3906 	case DISCOV_TYPE_INTERLEAVED:
3907 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3908 		break;
3909 	case DISCOV_TYPE_BREDR:
3910 		timeout = 0;
3911 		break;
3912 	default:
3913 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3914 		timeout = 0;
3915 		break;
3916 	}
3917 
3918 	if (timeout) {
3919 		/* When service discovery is used and the controller has
3920 		 * a strict duplicate filter, it is important to remember
3921 		 * the start and duration of the scan. This is required
3922 		 * for restarting scanning during the discovery phase.
3923 		 */
3924 		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3925 			     &hdev->quirks) &&
3926 		    (hdev->discovery.uuid_count > 0 ||
3927 		     hdev->discovery.rssi != HCI_RSSI_INVALID)) {
3928 			hdev->discovery.scan_start = jiffies;
3929 			hdev->discovery.scan_duration = timeout;
3930 		}
3931 
3932 		queue_delayed_work(hdev->workqueue,
3933 				   &hdev->le_scan_disable, timeout);
3934 	}
3935 
3936 unlock:
3937 	hci_dev_unlock(hdev);
3938 }
3939 
3940 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3941 			   void *data, u16 len)
3942 {
3943 	struct mgmt_cp_start_discovery *cp = data;
3944 	struct pending_cmd *cmd;
3945 	struct hci_request req;
3946 	u8 status;
3947 	int err;
3948 
3949 	BT_DBG("%s", hdev->name);
3950 
3951 	hci_dev_lock(hdev);
3952 
3953 	if (!hdev_is_powered(hdev)) {
3954 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3955 				   MGMT_STATUS_NOT_POWERED,
3956 				   &cp->type, sizeof(cp->type));
3957 		goto failed;
3958 	}
3959 
3960 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3961 	    test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3962 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3963 				   MGMT_STATUS_BUSY, &cp->type,
3964 				   sizeof(cp->type));
3965 		goto failed;
3966 	}
3967 
3968 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3969 	if (!cmd) {
3970 		err = -ENOMEM;
3971 		goto failed;
3972 	}
3973 
3974 	cmd->cmd_complete = generic_cmd_complete;
3975 
3976 	/* Clear the discovery filter first to free any previously
3977 	 * allocated memory for the UUID list.
3978 	 */
3979 	hci_discovery_filter_clear(hdev);
3980 
3981 	hdev->discovery.type = cp->type;
3982 	hdev->discovery.report_invalid_rssi = false;
3983 
3984 	hci_req_init(&req, hdev);
3985 
3986 	if (!trigger_discovery(&req, &status)) {
3987 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3988 				   status, &cp->type, sizeof(cp->type));
3989 		mgmt_pending_remove(cmd);
3990 		goto failed;
3991 	}
3992 
3993 	err = hci_req_run(&req, start_discovery_complete);
3994 	if (err < 0) {
3995 		mgmt_pending_remove(cmd);
3996 		goto failed;
3997 	}
3998 
3999 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4000 
4001 failed:
4002 	hci_dev_unlock(hdev);
4003 	return err;
4004 }
4005 
4006 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
4007 {
4008 	return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4009 			    cmd->param, 1);
4010 }
4011 
4012 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4013 				   void *data, u16 len)
4014 {
4015 	struct mgmt_cp_start_service_discovery *cp = data;
4016 	struct pending_cmd *cmd;
4017 	struct hci_request req;
4018 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4019 	u16 uuid_count, expected_len;
4020 	u8 status;
4021 	int err;
4022 
4023 	BT_DBG("%s", hdev->name);
4024 
4025 	hci_dev_lock(hdev);
4026 
4027 	if (!hdev_is_powered(hdev)) {
4028 		err = cmd_complete(sk, hdev->id,
4029 				   MGMT_OP_START_SERVICE_DISCOVERY,
4030 				   MGMT_STATUS_NOT_POWERED,
4031 				   &cp->type, sizeof(cp->type));
4032 		goto failed;
4033 	}
4034 
4035 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4036 	    test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4037 		err = cmd_complete(sk, hdev->id,
4038 				   MGMT_OP_START_SERVICE_DISCOVERY,
4039 				   MGMT_STATUS_BUSY, &cp->type,
4040 				   sizeof(cp->type));
4041 		goto failed;
4042 	}
4043 
4044 	uuid_count = __le16_to_cpu(cp->uuid_count);
4045 	if (uuid_count > max_uuid_count) {
4046 		BT_ERR("service_discovery: too big uuid_count value %u",
4047 		       uuid_count);
4048 		err = cmd_complete(sk, hdev->id,
4049 				   MGMT_OP_START_SERVICE_DISCOVERY,
4050 				   MGMT_STATUS_INVALID_PARAMS, &cp->type,
4051 				   sizeof(cp->type));
4052 		goto failed;
4053 	}
4054 
4055 	expected_len = sizeof(*cp) + uuid_count * 16;
4056 	if (expected_len != len) {
4057 		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4058 		       expected_len, len);
4059 		err = cmd_complete(sk, hdev->id,
4060 				   MGMT_OP_START_SERVICE_DISCOVERY,
4061 				   MGMT_STATUS_INVALID_PARAMS, &cp->type,
4062 				   sizeof(cp->type));
4063 		goto failed;
4064 	}
4065 
4066 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4067 			       hdev, data, len);
4068 	if (!cmd) {
4069 		err = -ENOMEM;
4070 		goto failed;
4071 	}
4072 
4073 	cmd->cmd_complete = service_discovery_cmd_complete;
4074 
4075 	/* Clear the discovery filter first to free any previously
4076 	 * allocated memory for the UUID list.
4077 	 */
4078 	hci_discovery_filter_clear(hdev);
4079 
4080 	hdev->discovery.type = cp->type;
4081 	hdev->discovery.rssi = cp->rssi;
4082 	hdev->discovery.uuid_count = uuid_count;
4083 
4084 	if (uuid_count > 0) {
4085 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4086 						GFP_KERNEL);
4087 		if (!hdev->discovery.uuids) {
4088 			err = cmd_complete(sk, hdev->id,
4089 					   MGMT_OP_START_SERVICE_DISCOVERY,
4090 					   MGMT_STATUS_FAILED,
4091 					   &cp->type, sizeof(cp->type));
4092 			mgmt_pending_remove(cmd);
4093 			goto failed;
4094 		}
4095 	}
4096 
4097 	hci_req_init(&req, hdev);
4098 
4099 	if (!trigger_discovery(&req, &status)) {
4100 		err = cmd_complete(sk, hdev->id,
4101 				   MGMT_OP_START_SERVICE_DISCOVERY,
4102 				   status, &cp->type, sizeof(cp->type));
4103 		mgmt_pending_remove(cmd);
4104 		goto failed;
4105 	}
4106 
4107 	err = hci_req_run(&req, start_discovery_complete);
4108 	if (err < 0) {
4109 		mgmt_pending_remove(cmd);
4110 		goto failed;
4111 	}
4112 
4113 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4114 
4115 failed:
4116 	hci_dev_unlock(hdev);
4117 	return err;
4118 }
4119 
4120 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4121 {
4122 	struct pending_cmd *cmd;
4123 
4124 	BT_DBG("status %d", status);
4125 
4126 	hci_dev_lock(hdev);
4127 
4128 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4129 	if (cmd) {
4130 		cmd->cmd_complete(cmd, mgmt_status(status));
4131 		mgmt_pending_remove(cmd);
4132 	}
4133 
4134 	if (!status)
4135 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4136 
4137 	hci_dev_unlock(hdev);
4138 }
4139 
4140 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4141 			  u16 len)
4142 {
4143 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4144 	struct pending_cmd *cmd;
4145 	struct hci_request req;
4146 	int err;
4147 
4148 	BT_DBG("%s", hdev->name);
4149 
4150 	hci_dev_lock(hdev);
4151 
4152 	if (!hci_discovery_active(hdev)) {
4153 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4154 				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
4155 				   sizeof(mgmt_cp->type));
4156 		goto unlock;
4157 	}
4158 
4159 	if (hdev->discovery.type != mgmt_cp->type) {
4160 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4161 				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4162 				   sizeof(mgmt_cp->type));
4163 		goto unlock;
4164 	}
4165 
4166 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4167 	if (!cmd) {
4168 		err = -ENOMEM;
4169 		goto unlock;
4170 	}
4171 
4172 	cmd->cmd_complete = generic_cmd_complete;
4173 
4174 	hci_req_init(&req, hdev);
4175 
4176 	hci_stop_discovery(&req);
4177 
4178 	err = hci_req_run(&req, stop_discovery_complete);
4179 	if (!err) {
4180 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4181 		goto unlock;
4182 	}
4183 
4184 	mgmt_pending_remove(cmd);
4185 
4186 	/* If no HCI commands were sent we're done */
4187 	if (err == -ENODATA) {
4188 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4189 				   &mgmt_cp->type, sizeof(mgmt_cp->type));
4190 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4191 	}
4192 
4193 unlock:
4194 	hci_dev_unlock(hdev);
4195 	return err;
4196 }
4197 
4198 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4199 			u16 len)
4200 {
4201 	struct mgmt_cp_confirm_name *cp = data;
4202 	struct inquiry_entry *e;
4203 	int err;
4204 
4205 	BT_DBG("%s", hdev->name);
4206 
4207 	hci_dev_lock(hdev);
4208 
4209 	if (!hci_discovery_active(hdev)) {
4210 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4211 				   MGMT_STATUS_FAILED, &cp->addr,
4212 				   sizeof(cp->addr));
4213 		goto failed;
4214 	}
4215 
4216 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4217 	if (!e) {
4218 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4219 				   MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4220 				   sizeof(cp->addr));
4221 		goto failed;
4222 	}
4223 
4224 	if (cp->name_known) {
4225 		e->name_state = NAME_KNOWN;
4226 		list_del(&e->list);
4227 	} else {
4228 		e->name_state = NAME_NEEDED;
4229 		hci_inquiry_cache_update_resolve(hdev, e);
4230 	}
4231 
4232 	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4233 			   sizeof(cp->addr));
4234 
4235 failed:
4236 	hci_dev_unlock(hdev);
4237 	return err;
4238 }
4239 
4240 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4241 			u16 len)
4242 {
4243 	struct mgmt_cp_block_device *cp = data;
4244 	u8 status;
4245 	int err;
4246 
4247 	BT_DBG("%s", hdev->name);
4248 
4249 	if (!bdaddr_type_is_valid(cp->addr.type))
4250 		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4251 				    MGMT_STATUS_INVALID_PARAMS,
4252 				    &cp->addr, sizeof(cp->addr));
4253 
4254 	hci_dev_lock(hdev);
4255 
4256 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4257 				  cp->addr.type);
4258 	if (err < 0) {
4259 		status = MGMT_STATUS_FAILED;
4260 		goto done;
4261 	}
4262 
4263 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4264 		   sk);
4265 	status = MGMT_STATUS_SUCCESS;
4266 
4267 done:
4268 	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4269 			   &cp->addr, sizeof(cp->addr));
4270 
4271 	hci_dev_unlock(hdev);
4272 
4273 	return err;
4274 }
4275 
4276 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4277 			  u16 len)
4278 {
4279 	struct mgmt_cp_unblock_device *cp = data;
4280 	u8 status;
4281 	int err;
4282 
4283 	BT_DBG("%s", hdev->name);
4284 
4285 	if (!bdaddr_type_is_valid(cp->addr.type))
4286 		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4287 				    MGMT_STATUS_INVALID_PARAMS,
4288 				    &cp->addr, sizeof(cp->addr));
4289 
4290 	hci_dev_lock(hdev);
4291 
4292 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4293 				  cp->addr.type);
4294 	if (err < 0) {
4295 		status = MGMT_STATUS_INVALID_PARAMS;
4296 		goto done;
4297 	}
4298 
4299 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4300 		   sk);
4301 	status = MGMT_STATUS_SUCCESS;
4302 
4303 done:
4304 	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4305 			   &cp->addr, sizeof(cp->addr));
4306 
4307 	hci_dev_unlock(hdev);
4308 
4309 	return err;
4310 }
4311 
4312 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4313 			 u16 len)
4314 {
4315 	struct mgmt_cp_set_device_id *cp = data;
4316 	struct hci_request req;
4317 	int err;
4318 	__u16 source;
4319 
4320 	BT_DBG("%s", hdev->name);
4321 
4322 	source = __le16_to_cpu(cp->source);
4323 
4324 	if (source > 0x0002)
4325 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4326 				  MGMT_STATUS_INVALID_PARAMS);
4327 
4328 	hci_dev_lock(hdev);
4329 
4330 	hdev->devid_source = source;
4331 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4332 	hdev->devid_product = __le16_to_cpu(cp->product);
4333 	hdev->devid_version = __le16_to_cpu(cp->version);
4334 
4335 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4336 
4337 	hci_req_init(&req, hdev);
4338 	update_eir(&req);
4339 	hci_req_run(&req, NULL);
4340 
4341 	hci_dev_unlock(hdev);
4342 
4343 	return err;
4344 }
4345 
4346 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4347 				     u16 opcode)
4348 {
4349 	struct cmd_lookup match = { NULL, hdev };
4350 
4351 	hci_dev_lock(hdev);
4352 
4353 	if (status) {
4354 		u8 mgmt_err = mgmt_status(status);
4355 
4356 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4357 				     cmd_status_rsp, &mgmt_err);
4358 		goto unlock;
4359 	}
4360 
4361 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4362 		set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4363 	else
4364 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4365 
4366 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4367 			     &match);
4368 
4369 	new_settings(hdev, match.sk);
4370 
4371 	if (match.sk)
4372 		sock_put(match.sk);
4373 
4374 unlock:
4375 	hci_dev_unlock(hdev);
4376 }
4377 
4378 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4379 			   u16 len)
4380 {
4381 	struct mgmt_mode *cp = data;
4382 	struct pending_cmd *cmd;
4383 	struct hci_request req;
4384 	u8 val, enabled, status;
4385 	int err;
4386 
4387 	BT_DBG("request for %s", hdev->name);
4388 
4389 	status = mgmt_le_support(hdev);
4390 	if (status)
4391 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4392 				  status);
4393 
4394 	if (cp->val != 0x00 && cp->val != 0x01)
4395 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4396 				  MGMT_STATUS_INVALID_PARAMS);
4397 
4398 	hci_dev_lock(hdev);
4399 
4400 	val = !!cp->val;
4401 	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4402 
4403 	/* The following conditions are ones which mean that we should
4404 	 * not do any HCI communication but directly send a mgmt
4405 	 * response to user space (after toggling the flag if
4406 	 * necessary).
4407 	 */
4408 	if (!hdev_is_powered(hdev) || val == enabled ||
4409 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4410 	    (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4411 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4412 		bool changed = false;
4413 
4414 		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4415 			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4416 			changed = true;
4417 		}
4418 
4419 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4420 		if (err < 0)
4421 			goto unlock;
4422 
4423 		if (changed)
4424 			err = new_settings(hdev, sk);
4425 
4426 		goto unlock;
4427 	}
4428 
4429 	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4430 	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4431 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4432 				 MGMT_STATUS_BUSY);
4433 		goto unlock;
4434 	}
4435 
4436 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4437 	if (!cmd) {
4438 		err = -ENOMEM;
4439 		goto unlock;
4440 	}
4441 
4442 	hci_req_init(&req, hdev);
4443 
4444 	if (val)
4445 		enable_advertising(&req);
4446 	else
4447 		disable_advertising(&req);
4448 
4449 	err = hci_req_run(&req, set_advertising_complete);
4450 	if (err < 0)
4451 		mgmt_pending_remove(cmd);
4452 
4453 unlock:
4454 	hci_dev_unlock(hdev);
4455 	return err;
4456 }
4457 
4458 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4459 			      void *data, u16 len)
4460 {
4461 	struct mgmt_cp_set_static_address *cp = data;
4462 	int err;
4463 
4464 	BT_DBG("%s", hdev->name);
4465 
4466 	if (!lmp_le_capable(hdev))
4467 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4468 				  MGMT_STATUS_NOT_SUPPORTED);
4469 
4470 	if (hdev_is_powered(hdev))
4471 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4472 				  MGMT_STATUS_REJECTED);
4473 
4474 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4475 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4476 			return cmd_status(sk, hdev->id,
4477 					  MGMT_OP_SET_STATIC_ADDRESS,
4478 					  MGMT_STATUS_INVALID_PARAMS);
4479 
4480 		/* Two most significant bits shall be set */
4481 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4482 			return cmd_status(sk, hdev->id,
4483 					  MGMT_OP_SET_STATIC_ADDRESS,
4484 					  MGMT_STATUS_INVALID_PARAMS);
4485 	}
4486 
4487 	hci_dev_lock(hdev);
4488 
4489 	bacpy(&hdev->static_addr, &cp->bdaddr);
4490 
4491 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4492 
4493 	hci_dev_unlock(hdev);
4494 
4495 	return err;
4496 }
4497 
4498 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4499 			   void *data, u16 len)
4500 {
4501 	struct mgmt_cp_set_scan_params *cp = data;
4502 	__u16 interval, window;
4503 	int err;
4504 
4505 	BT_DBG("%s", hdev->name);
4506 
4507 	if (!lmp_le_capable(hdev))
4508 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4509 				  MGMT_STATUS_NOT_SUPPORTED);
4510 
4511 	interval = __le16_to_cpu(cp->interval);
4512 
4513 	if (interval < 0x0004 || interval > 0x4000)
4514 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4515 				  MGMT_STATUS_INVALID_PARAMS);
4516 
4517 	window = __le16_to_cpu(cp->window);
4518 
4519 	if (window < 0x0004 || window > 0x4000)
4520 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4521 				  MGMT_STATUS_INVALID_PARAMS);
4522 
4523 	if (window > interval)
4524 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4525 				  MGMT_STATUS_INVALID_PARAMS);
4526 
4527 	hci_dev_lock(hdev);
4528 
4529 	hdev->le_scan_interval = interval;
4530 	hdev->le_scan_window = window;
4531 
4532 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4533 
4534 	/* If background scan is running, restart it so new parameters are
4535 	 * loaded.
4536 	 */
4537 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4538 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4539 		struct hci_request req;
4540 
4541 		hci_req_init(&req, hdev);
4542 
4543 		hci_req_add_le_scan_disable(&req);
4544 		hci_req_add_le_passive_scan(&req);
4545 
4546 		hci_req_run(&req, NULL);
4547 	}
4548 
4549 	hci_dev_unlock(hdev);
4550 
4551 	return err;
4552 }
4553 
4554 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4555 				      u16 opcode)
4556 {
4557 	struct pending_cmd *cmd;
4558 
4559 	BT_DBG("status 0x%02x", status);
4560 
4561 	hci_dev_lock(hdev);
4562 
4563 	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4564 	if (!cmd)
4565 		goto unlock;
4566 
4567 	if (status) {
4568 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4569 			   mgmt_status(status));
4570 	} else {
4571 		struct mgmt_mode *cp = cmd->param;
4572 
4573 		if (cp->val)
4574 			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4575 		else
4576 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4577 
4578 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4579 		new_settings(hdev, cmd->sk);
4580 	}
4581 
4582 	mgmt_pending_remove(cmd);
4583 
4584 unlock:
4585 	hci_dev_unlock(hdev);
4586 }
4587 
4588 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4589 				void *data, u16 len)
4590 {
4591 	struct mgmt_mode *cp = data;
4592 	struct pending_cmd *cmd;
4593 	struct hci_request req;
4594 	int err;
4595 
4596 	BT_DBG("%s", hdev->name);
4597 
4598 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4599 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4600 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4601 				  MGMT_STATUS_NOT_SUPPORTED);
4602 
4603 	if (cp->val != 0x00 && cp->val != 0x01)
4604 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4605 				  MGMT_STATUS_INVALID_PARAMS);
4606 
4607 	if (!hdev_is_powered(hdev))
4608 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4609 				  MGMT_STATUS_NOT_POWERED);
4610 
4611 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4612 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4613 				  MGMT_STATUS_REJECTED);
4614 
4615 	hci_dev_lock(hdev);
4616 
4617 	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4618 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4619 				 MGMT_STATUS_BUSY);
4620 		goto unlock;
4621 	}
4622 
4623 	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4624 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4625 					hdev);
4626 		goto unlock;
4627 	}
4628 
4629 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4630 			       data, len);
4631 	if (!cmd) {
4632 		err = -ENOMEM;
4633 		goto unlock;
4634 	}
4635 
4636 	hci_req_init(&req, hdev);
4637 
4638 	write_fast_connectable(&req, cp->val);
4639 
4640 	err = hci_req_run(&req, fast_connectable_complete);
4641 	if (err < 0) {
4642 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4643 				 MGMT_STATUS_FAILED);
4644 		mgmt_pending_remove(cmd);
4645 	}
4646 
4647 unlock:
4648 	hci_dev_unlock(hdev);
4649 
4650 	return err;
4651 }
4652 
4653 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4654 {
4655 	struct pending_cmd *cmd;
4656 
4657 	BT_DBG("status 0x%02x", status);
4658 
4659 	hci_dev_lock(hdev);
4660 
4661 	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4662 	if (!cmd)
4663 		goto unlock;
4664 
4665 	if (status) {
4666 		u8 mgmt_err = mgmt_status(status);
4667 
4668 		/* We need to restore the flag if related HCI commands
4669 		 * failed.
4670 		 */
4671 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4672 
4673 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4674 	} else {
4675 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4676 		new_settings(hdev, cmd->sk);
4677 	}
4678 
4679 	mgmt_pending_remove(cmd);
4680 
4681 unlock:
4682 	hci_dev_unlock(hdev);
4683 }
4684 
4685 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4686 {
4687 	struct mgmt_mode *cp = data;
4688 	struct pending_cmd *cmd;
4689 	struct hci_request req;
4690 	int err;
4691 
4692 	BT_DBG("request for %s", hdev->name);
4693 
4694 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4695 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4696 				  MGMT_STATUS_NOT_SUPPORTED);
4697 
4698 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4699 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4700 				  MGMT_STATUS_REJECTED);
4701 
4702 	if (cp->val != 0x00 && cp->val != 0x01)
4703 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4704 				  MGMT_STATUS_INVALID_PARAMS);
4705 
4706 	hci_dev_lock(hdev);
4707 
4708 	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4709 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4710 		goto unlock;
4711 	}
4712 
4713 	if (!hdev_is_powered(hdev)) {
4714 		if (!cp->val) {
4715 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4716 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4717 			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4718 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4719 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4720 		}
4721 
4722 		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4723 
4724 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4725 		if (err < 0)
4726 			goto unlock;
4727 
4728 		err = new_settings(hdev, sk);
4729 		goto unlock;
4730 	}
4731 
4732 	/* Reject disabling when powered on */
4733 	if (!cp->val) {
4734 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4735 				 MGMT_STATUS_REJECTED);
4736 		goto unlock;
4737 	} else {
4738 		/* When configuring a dual-mode controller to operate
4739 		 * with LE only and using a static address, then switching
4740 		 * BR/EDR back on is not allowed.
4741 		 *
4742 		 * Dual-mode controllers shall operate with the public
4743 		 * address as its identity address for BR/EDR and LE. So
4744 		 * reject the attempt to create an invalid configuration.
4745 		 *
4746 		 * The same restrictions applies when secure connections
4747 		 * has been enabled. For BR/EDR this is a controller feature
4748 		 * while for LE it is a host stack feature. This means that
4749 		 * switching BR/EDR back on when secure connections has been
4750 		 * enabled is not a supported transaction.
4751 		 */
4752 		if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4753 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4754 		     test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4755 			err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4756 					 MGMT_STATUS_REJECTED);
4757 			goto unlock;
4758 		}
4759 	}
4760 
4761 	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4762 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4763 				 MGMT_STATUS_BUSY);
4764 		goto unlock;
4765 	}
4766 
4767 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4768 	if (!cmd) {
4769 		err = -ENOMEM;
4770 		goto unlock;
4771 	}
4772 
4773 	/* We need to flip the bit already here so that update_adv_data
4774 	 * generates the correct flags.
4775 	 */
4776 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4777 
4778 	hci_req_init(&req, hdev);
4779 
4780 	write_fast_connectable(&req, false);
4781 	__hci_update_page_scan(&req);
4782 
4783 	/* Since only the advertising data flags will change, there
4784 	 * is no need to update the scan response data.
4785 	 */
4786 	update_adv_data(&req);
4787 
4788 	err = hci_req_run(&req, set_bredr_complete);
4789 	if (err < 0)
4790 		mgmt_pending_remove(cmd);
4791 
4792 unlock:
4793 	hci_dev_unlock(hdev);
4794 	return err;
4795 }
4796 
4797 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4798 {
4799 	struct pending_cmd *cmd;
4800 	struct mgmt_mode *cp;
4801 
4802 	BT_DBG("%s status %u", hdev->name, status);
4803 
4804 	hci_dev_lock(hdev);
4805 
4806 	cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4807 	if (!cmd)
4808 		goto unlock;
4809 
4810 	if (status) {
4811 		cmd_status(cmd->sk, cmd->index, cmd->opcode,
4812 			   mgmt_status(status));
4813 		goto remove;
4814 	}
4815 
4816 	cp = cmd->param;
4817 
4818 	switch (cp->val) {
4819 	case 0x00:
4820 		clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4821 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4822 		break;
4823 	case 0x01:
4824 		set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4825 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4826 		break;
4827 	case 0x02:
4828 		set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4829 		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4830 		break;
4831 	}
4832 
4833 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4834 	new_settings(hdev, cmd->sk);
4835 
4836 remove:
4837 	mgmt_pending_remove(cmd);
4838 unlock:
4839 	hci_dev_unlock(hdev);
4840 }
4841 
4842 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4843 			   void *data, u16 len)
4844 {
4845 	struct mgmt_mode *cp = data;
4846 	struct pending_cmd *cmd;
4847 	struct hci_request req;
4848 	u8 val;
4849 	int err;
4850 
4851 	BT_DBG("request for %s", hdev->name);
4852 
4853 	if (!lmp_sc_capable(hdev) &&
4854 	    !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4855 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4856 				  MGMT_STATUS_NOT_SUPPORTED);
4857 
4858 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4859 	    lmp_sc_capable(hdev) &&
4860 	    !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4861 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4862 				  MGMT_STATUS_REJECTED);
4863 
4864 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4865 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4866 				  MGMT_STATUS_INVALID_PARAMS);
4867 
4868 	hci_dev_lock(hdev);
4869 
4870 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4871 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4872 		bool changed;
4873 
4874 		if (cp->val) {
4875 			changed = !test_and_set_bit(HCI_SC_ENABLED,
4876 						    &hdev->dev_flags);
4877 			if (cp->val == 0x02)
4878 				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4879 			else
4880 				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4881 		} else {
4882 			changed = test_and_clear_bit(HCI_SC_ENABLED,
4883 						     &hdev->dev_flags);
4884 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4885 		}
4886 
4887 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4888 		if (err < 0)
4889 			goto failed;
4890 
4891 		if (changed)
4892 			err = new_settings(hdev, sk);
4893 
4894 		goto failed;
4895 	}
4896 
4897 	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4898 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4899 				 MGMT_STATUS_BUSY);
4900 		goto failed;
4901 	}
4902 
4903 	val = !!cp->val;
4904 
4905 	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4906 	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4907 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4908 		goto failed;
4909 	}
4910 
4911 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4912 	if (!cmd) {
4913 		err = -ENOMEM;
4914 		goto failed;
4915 	}
4916 
4917 	hci_req_init(&req, hdev);
4918 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4919 	err = hci_req_run(&req, sc_enable_complete);
4920 	if (err < 0) {
4921 		mgmt_pending_remove(cmd);
4922 		goto failed;
4923 	}
4924 
4925 failed:
4926 	hci_dev_unlock(hdev);
4927 	return err;
4928 }
4929 
4930 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4931 			  void *data, u16 len)
4932 {
4933 	struct mgmt_mode *cp = data;
4934 	bool changed, use_changed;
4935 	int err;
4936 
4937 	BT_DBG("request for %s", hdev->name);
4938 
4939 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4940 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4941 				  MGMT_STATUS_INVALID_PARAMS);
4942 
4943 	hci_dev_lock(hdev);
4944 
4945 	if (cp->val)
4946 		changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4947 					    &hdev->dev_flags);
4948 	else
4949 		changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4950 					     &hdev->dev_flags);
4951 
4952 	if (cp->val == 0x02)
4953 		use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4954 						&hdev->dev_flags);
4955 	else
4956 		use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4957 						 &hdev->dev_flags);
4958 
4959 	if (hdev_is_powered(hdev) && use_changed &&
4960 	    test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4961 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4962 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4963 			     sizeof(mode), &mode);
4964 	}
4965 
4966 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4967 	if (err < 0)
4968 		goto unlock;
4969 
4970 	if (changed)
4971 		err = new_settings(hdev, sk);
4972 
4973 unlock:
4974 	hci_dev_unlock(hdev);
4975 	return err;
4976 }
4977 
4978 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4979 		       u16 len)
4980 {
4981 	struct mgmt_cp_set_privacy *cp = cp_data;
4982 	bool changed;
4983 	int err;
4984 
4985 	BT_DBG("request for %s", hdev->name);
4986 
4987 	if (!lmp_le_capable(hdev))
4988 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4989 				  MGMT_STATUS_NOT_SUPPORTED);
4990 
4991 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
4992 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4993 				  MGMT_STATUS_INVALID_PARAMS);
4994 
4995 	if (hdev_is_powered(hdev))
4996 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4997 				  MGMT_STATUS_REJECTED);
4998 
4999 	hci_dev_lock(hdev);
5000 
5001 	/* If user space supports this command it is also expected to
5002 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5003 	 */
5004 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5005 
5006 	if (cp->privacy) {
5007 		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5008 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5009 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5010 	} else {
5011 		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5012 		memset(hdev->irk, 0, sizeof(hdev->irk));
5013 		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5014 	}
5015 
5016 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5017 	if (err < 0)
5018 		goto unlock;
5019 
5020 	if (changed)
5021 		err = new_settings(hdev, sk);
5022 
5023 unlock:
5024 	hci_dev_unlock(hdev);
5025 	return err;
5026 }
5027 
5028 static bool irk_is_valid(struct mgmt_irk_info *irk)
5029 {
5030 	switch (irk->addr.type) {
5031 	case BDADDR_LE_PUBLIC:
5032 		return true;
5033 
5034 	case BDADDR_LE_RANDOM:
5035 		/* Two most significant bits shall be set */
5036 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5037 			return false;
5038 		return true;
5039 	}
5040 
5041 	return false;
5042 }
5043 
5044 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5045 		     u16 len)
5046 {
5047 	struct mgmt_cp_load_irks *cp = cp_data;
5048 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5049 				   sizeof(struct mgmt_irk_info));
5050 	u16 irk_count, expected_len;
5051 	int i, err;
5052 
5053 	BT_DBG("request for %s", hdev->name);
5054 
5055 	if (!lmp_le_capable(hdev))
5056 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5057 				  MGMT_STATUS_NOT_SUPPORTED);
5058 
5059 	irk_count = __le16_to_cpu(cp->irk_count);
5060 	if (irk_count > max_irk_count) {
5061 		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5062 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5063 				  MGMT_STATUS_INVALID_PARAMS);
5064 	}
5065 
5066 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5067 	if (expected_len != len) {
5068 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5069 		       expected_len, len);
5070 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5071 				  MGMT_STATUS_INVALID_PARAMS);
5072 	}
5073 
5074 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5075 
5076 	for (i = 0; i < irk_count; i++) {
5077 		struct mgmt_irk_info *key = &cp->irks[i];
5078 
5079 		if (!irk_is_valid(key))
5080 			return cmd_status(sk, hdev->id,
5081 					  MGMT_OP_LOAD_IRKS,
5082 					  MGMT_STATUS_INVALID_PARAMS);
5083 	}
5084 
5085 	hci_dev_lock(hdev);
5086 
5087 	hci_smp_irks_clear(hdev);
5088 
5089 	for (i = 0; i < irk_count; i++) {
5090 		struct mgmt_irk_info *irk = &cp->irks[i];
5091 		u8 addr_type;
5092 
5093 		if (irk->addr.type == BDADDR_LE_PUBLIC)
5094 			addr_type = ADDR_LE_DEV_PUBLIC;
5095 		else
5096 			addr_type = ADDR_LE_DEV_RANDOM;
5097 
5098 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5099 			    BDADDR_ANY);
5100 	}
5101 
5102 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5103 
5104 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5105 
5106 	hci_dev_unlock(hdev);
5107 
5108 	return err;
5109 }
5110 
5111 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5112 {
5113 	if (key->master != 0x00 && key->master != 0x01)
5114 		return false;
5115 
5116 	switch (key->addr.type) {
5117 	case BDADDR_LE_PUBLIC:
5118 		return true;
5119 
5120 	case BDADDR_LE_RANDOM:
5121 		/* Two most significant bits shall be set */
5122 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5123 			return false;
5124 		return true;
5125 	}
5126 
5127 	return false;
5128 }
5129 
5130 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5131 			       void *cp_data, u16 len)
5132 {
5133 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5134 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5135 				   sizeof(struct mgmt_ltk_info));
5136 	u16 key_count, expected_len;
5137 	int i, err;
5138 
5139 	BT_DBG("request for %s", hdev->name);
5140 
5141 	if (!lmp_le_capable(hdev))
5142 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5143 				  MGMT_STATUS_NOT_SUPPORTED);
5144 
5145 	key_count = __le16_to_cpu(cp->key_count);
5146 	if (key_count > max_key_count) {
5147 		BT_ERR("load_ltks: too big key_count value %u", key_count);
5148 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5149 				  MGMT_STATUS_INVALID_PARAMS);
5150 	}
5151 
5152 	expected_len = sizeof(*cp) + key_count *
5153 					sizeof(struct mgmt_ltk_info);
5154 	if (expected_len != len) {
5155 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5156 		       expected_len, len);
5157 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5158 				  MGMT_STATUS_INVALID_PARAMS);
5159 	}
5160 
5161 	BT_DBG("%s key_count %u", hdev->name, key_count);
5162 
5163 	for (i = 0; i < key_count; i++) {
5164 		struct mgmt_ltk_info *key = &cp->keys[i];
5165 
5166 		if (!ltk_is_valid(key))
5167 			return cmd_status(sk, hdev->id,
5168 					  MGMT_OP_LOAD_LONG_TERM_KEYS,
5169 					  MGMT_STATUS_INVALID_PARAMS);
5170 	}
5171 
5172 	hci_dev_lock(hdev);
5173 
5174 	hci_smp_ltks_clear(hdev);
5175 
5176 	for (i = 0; i < key_count; i++) {
5177 		struct mgmt_ltk_info *key = &cp->keys[i];
5178 		u8 type, addr_type, authenticated;
5179 
5180 		if (key->addr.type == BDADDR_LE_PUBLIC)
5181 			addr_type = ADDR_LE_DEV_PUBLIC;
5182 		else
5183 			addr_type = ADDR_LE_DEV_RANDOM;
5184 
5185 		switch (key->type) {
5186 		case MGMT_LTK_UNAUTHENTICATED:
5187 			authenticated = 0x00;
5188 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5189 			break;
5190 		case MGMT_LTK_AUTHENTICATED:
5191 			authenticated = 0x01;
5192 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5193 			break;
5194 		case MGMT_LTK_P256_UNAUTH:
5195 			authenticated = 0x00;
5196 			type = SMP_LTK_P256;
5197 			break;
5198 		case MGMT_LTK_P256_AUTH:
5199 			authenticated = 0x01;
5200 			type = SMP_LTK_P256;
5201 			break;
5202 		case MGMT_LTK_P256_DEBUG:
5203 			authenticated = 0x00;
5204 			type = SMP_LTK_P256_DEBUG;
5205 		default:
5206 			continue;
5207 		}
5208 
5209 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5210 			    authenticated, key->val, key->enc_size, key->ediv,
5211 			    key->rand);
5212 	}
5213 
5214 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5215 			   NULL, 0);
5216 
5217 	hci_dev_unlock(hdev);
5218 
5219 	return err;
5220 }
5221 
5222 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5223 {
5224 	struct hci_conn *conn = cmd->user_data;
5225 	struct mgmt_rp_get_conn_info rp;
5226 	int err;
5227 
5228 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5229 
5230 	if (status == MGMT_STATUS_SUCCESS) {
5231 		rp.rssi = conn->rssi;
5232 		rp.tx_power = conn->tx_power;
5233 		rp.max_tx_power = conn->max_tx_power;
5234 	} else {
5235 		rp.rssi = HCI_RSSI_INVALID;
5236 		rp.tx_power = HCI_TX_POWER_INVALID;
5237 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5238 	}
5239 
5240 	err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5241 			   &rp, sizeof(rp));
5242 
5243 	hci_conn_drop(conn);
5244 	hci_conn_put(conn);
5245 
5246 	return err;
5247 }
5248 
5249 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5250 				       u16 opcode)
5251 {
5252 	struct hci_cp_read_rssi *cp;
5253 	struct pending_cmd *cmd;
5254 	struct hci_conn *conn;
5255 	u16 handle;
5256 	u8 status;
5257 
5258 	BT_DBG("status 0x%02x", hci_status);
5259 
5260 	hci_dev_lock(hdev);
5261 
5262 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5263 	 * Level so we check which one was last sent to retrieve connection
5264 	 * handle.  Both commands have handle as first parameter so it's safe to
5265 	 * cast data on the same command struct.
5266 	 *
5267 	 * First command sent is always Read RSSI and we fail only if it fails.
5268 	 * In other case we simply override error to indicate success as we
5269 	 * already remembered if TX power value is actually valid.
5270 	 */
5271 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5272 	if (!cp) {
5273 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5274 		status = MGMT_STATUS_SUCCESS;
5275 	} else {
5276 		status = mgmt_status(hci_status);
5277 	}
5278 
5279 	if (!cp) {
5280 		BT_ERR("invalid sent_cmd in conn_info response");
5281 		goto unlock;
5282 	}
5283 
5284 	handle = __le16_to_cpu(cp->handle);
5285 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5286 	if (!conn) {
5287 		BT_ERR("unknown handle (%d) in conn_info response", handle);
5288 		goto unlock;
5289 	}
5290 
5291 	cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5292 	if (!cmd)
5293 		goto unlock;
5294 
5295 	cmd->cmd_complete(cmd, status);
5296 	mgmt_pending_remove(cmd);
5297 
5298 unlock:
5299 	hci_dev_unlock(hdev);
5300 }
5301 
5302 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5303 			 u16 len)
5304 {
5305 	struct mgmt_cp_get_conn_info *cp = data;
5306 	struct mgmt_rp_get_conn_info rp;
5307 	struct hci_conn *conn;
5308 	unsigned long conn_info_age;
5309 	int err = 0;
5310 
5311 	BT_DBG("%s", hdev->name);
5312 
5313 	memset(&rp, 0, sizeof(rp));
5314 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5315 	rp.addr.type = cp->addr.type;
5316 
5317 	if (!bdaddr_type_is_valid(cp->addr.type))
5318 		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5319 				    MGMT_STATUS_INVALID_PARAMS,
5320 				    &rp, sizeof(rp));
5321 
5322 	hci_dev_lock(hdev);
5323 
5324 	if (!hdev_is_powered(hdev)) {
5325 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5326 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5327 		goto unlock;
5328 	}
5329 
5330 	if (cp->addr.type == BDADDR_BREDR)
5331 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5332 					       &cp->addr.bdaddr);
5333 	else
5334 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5335 
5336 	if (!conn || conn->state != BT_CONNECTED) {
5337 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5338 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5339 		goto unlock;
5340 	}
5341 
5342 	if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5343 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5344 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
5345 		goto unlock;
5346 	}
5347 
5348 	/* To avoid client trying to guess when to poll again for information we
5349 	 * calculate conn info age as random value between min/max set in hdev.
5350 	 */
5351 	conn_info_age = hdev->conn_info_min_age +
5352 			prandom_u32_max(hdev->conn_info_max_age -
5353 					hdev->conn_info_min_age);
5354 
5355 	/* Query controller to refresh cached values if they are too old or were
5356 	 * never read.
5357 	 */
5358 	if (time_after(jiffies, conn->conn_info_timestamp +
5359 		       msecs_to_jiffies(conn_info_age)) ||
5360 	    !conn->conn_info_timestamp) {
5361 		struct hci_request req;
5362 		struct hci_cp_read_tx_power req_txp_cp;
5363 		struct hci_cp_read_rssi req_rssi_cp;
5364 		struct pending_cmd *cmd;
5365 
5366 		hci_req_init(&req, hdev);
5367 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5368 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5369 			    &req_rssi_cp);
5370 
5371 		/* For LE links TX power does not change thus we don't need to
5372 		 * query for it once value is known.
5373 		 */
5374 		if (!bdaddr_type_is_le(cp->addr.type) ||
5375 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5376 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5377 			req_txp_cp.type = 0x00;
5378 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5379 				    sizeof(req_txp_cp), &req_txp_cp);
5380 		}
5381 
5382 		/* Max TX power needs to be read only once per connection */
5383 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5384 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5385 			req_txp_cp.type = 0x01;
5386 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5387 				    sizeof(req_txp_cp), &req_txp_cp);
5388 		}
5389 
5390 		err = hci_req_run(&req, conn_info_refresh_complete);
5391 		if (err < 0)
5392 			goto unlock;
5393 
5394 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5395 				       data, len);
5396 		if (!cmd) {
5397 			err = -ENOMEM;
5398 			goto unlock;
5399 		}
5400 
5401 		hci_conn_hold(conn);
5402 		cmd->user_data = hci_conn_get(conn);
5403 		cmd->cmd_complete = conn_info_cmd_complete;
5404 
5405 		conn->conn_info_timestamp = jiffies;
5406 	} else {
5407 		/* Cache is valid, just reply with values cached in hci_conn */
5408 		rp.rssi = conn->rssi;
5409 		rp.tx_power = conn->tx_power;
5410 		rp.max_tx_power = conn->max_tx_power;
5411 
5412 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5413 				   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5414 	}
5415 
5416 unlock:
5417 	hci_dev_unlock(hdev);
5418 	return err;
5419 }
5420 
5421 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5422 {
5423 	struct hci_conn *conn = cmd->user_data;
5424 	struct mgmt_rp_get_clock_info rp;
5425 	struct hci_dev *hdev;
5426 	int err;
5427 
5428 	memset(&rp, 0, sizeof(rp));
5429 	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5430 
5431 	if (status)
5432 		goto complete;
5433 
5434 	hdev = hci_dev_get(cmd->index);
5435 	if (hdev) {
5436 		rp.local_clock = cpu_to_le32(hdev->clock);
5437 		hci_dev_put(hdev);
5438 	}
5439 
5440 	if (conn) {
5441 		rp.piconet_clock = cpu_to_le32(conn->clock);
5442 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5443 	}
5444 
5445 complete:
5446 	err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5447 			   sizeof(rp));
5448 
5449 	if (conn) {
5450 		hci_conn_drop(conn);
5451 		hci_conn_put(conn);
5452 	}
5453 
5454 	return err;
5455 }
5456 
5457 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5458 {
5459 	struct hci_cp_read_clock *hci_cp;
5460 	struct pending_cmd *cmd;
5461 	struct hci_conn *conn;
5462 
5463 	BT_DBG("%s status %u", hdev->name, status);
5464 
5465 	hci_dev_lock(hdev);
5466 
5467 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5468 	if (!hci_cp)
5469 		goto unlock;
5470 
5471 	if (hci_cp->which) {
5472 		u16 handle = __le16_to_cpu(hci_cp->handle);
5473 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5474 	} else {
5475 		conn = NULL;
5476 	}
5477 
5478 	cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5479 	if (!cmd)
5480 		goto unlock;
5481 
5482 	cmd->cmd_complete(cmd, mgmt_status(status));
5483 	mgmt_pending_remove(cmd);
5484 
5485 unlock:
5486 	hci_dev_unlock(hdev);
5487 }
5488 
5489 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5490 			 u16 len)
5491 {
5492 	struct mgmt_cp_get_clock_info *cp = data;
5493 	struct mgmt_rp_get_clock_info rp;
5494 	struct hci_cp_read_clock hci_cp;
5495 	struct pending_cmd *cmd;
5496 	struct hci_request req;
5497 	struct hci_conn *conn;
5498 	int err;
5499 
5500 	BT_DBG("%s", hdev->name);
5501 
5502 	memset(&rp, 0, sizeof(rp));
5503 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5504 	rp.addr.type = cp->addr.type;
5505 
5506 	if (cp->addr.type != BDADDR_BREDR)
5507 		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5508 				    MGMT_STATUS_INVALID_PARAMS,
5509 				    &rp, sizeof(rp));
5510 
5511 	hci_dev_lock(hdev);
5512 
5513 	if (!hdev_is_powered(hdev)) {
5514 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5515 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5516 		goto unlock;
5517 	}
5518 
5519 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5520 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5521 					       &cp->addr.bdaddr);
5522 		if (!conn || conn->state != BT_CONNECTED) {
5523 			err = cmd_complete(sk, hdev->id,
5524 					   MGMT_OP_GET_CLOCK_INFO,
5525 					   MGMT_STATUS_NOT_CONNECTED,
5526 					   &rp, sizeof(rp));
5527 			goto unlock;
5528 		}
5529 	} else {
5530 		conn = NULL;
5531 	}
5532 
5533 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5534 	if (!cmd) {
5535 		err = -ENOMEM;
5536 		goto unlock;
5537 	}
5538 
5539 	cmd->cmd_complete = clock_info_cmd_complete;
5540 
5541 	hci_req_init(&req, hdev);
5542 
5543 	memset(&hci_cp, 0, sizeof(hci_cp));
5544 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5545 
5546 	if (conn) {
5547 		hci_conn_hold(conn);
5548 		cmd->user_data = hci_conn_get(conn);
5549 
5550 		hci_cp.handle = cpu_to_le16(conn->handle);
5551 		hci_cp.which = 0x01; /* Piconet clock */
5552 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5553 	}
5554 
5555 	err = hci_req_run(&req, get_clock_info_complete);
5556 	if (err < 0)
5557 		mgmt_pending_remove(cmd);
5558 
5559 unlock:
5560 	hci_dev_unlock(hdev);
5561 	return err;
5562 }
5563 
5564 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5565 {
5566 	struct hci_conn *conn;
5567 
5568 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5569 	if (!conn)
5570 		return false;
5571 
5572 	if (conn->dst_type != type)
5573 		return false;
5574 
5575 	if (conn->state != BT_CONNECTED)
5576 		return false;
5577 
5578 	return true;
5579 }
5580 
5581 /* This function requires the caller holds hdev->lock */
5582 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5583 			       u8 addr_type, u8 auto_connect)
5584 {
5585 	struct hci_dev *hdev = req->hdev;
5586 	struct hci_conn_params *params;
5587 
5588 	params = hci_conn_params_add(hdev, addr, addr_type);
5589 	if (!params)
5590 		return -EIO;
5591 
5592 	if (params->auto_connect == auto_connect)
5593 		return 0;
5594 
5595 	list_del_init(&params->action);
5596 
5597 	switch (auto_connect) {
5598 	case HCI_AUTO_CONN_DISABLED:
5599 	case HCI_AUTO_CONN_LINK_LOSS:
5600 		__hci_update_background_scan(req);
5601 		break;
5602 	case HCI_AUTO_CONN_REPORT:
5603 		list_add(&params->action, &hdev->pend_le_reports);
5604 		__hci_update_background_scan(req);
5605 		break;
5606 	case HCI_AUTO_CONN_DIRECT:
5607 	case HCI_AUTO_CONN_ALWAYS:
5608 		if (!is_connected(hdev, addr, addr_type)) {
5609 			list_add(&params->action, &hdev->pend_le_conns);
5610 			__hci_update_background_scan(req);
5611 		}
5612 		break;
5613 	}
5614 
5615 	params->auto_connect = auto_connect;
5616 
5617 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5618 	       auto_connect);
5619 
5620 	return 0;
5621 }
5622 
5623 static void device_added(struct sock *sk, struct hci_dev *hdev,
5624 			 bdaddr_t *bdaddr, u8 type, u8 action)
5625 {
5626 	struct mgmt_ev_device_added ev;
5627 
5628 	bacpy(&ev.addr.bdaddr, bdaddr);
5629 	ev.addr.type = type;
5630 	ev.action = action;
5631 
5632 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5633 }
5634 
5635 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5636 {
5637 	struct pending_cmd *cmd;
5638 
5639 	BT_DBG("status 0x%02x", status);
5640 
5641 	hci_dev_lock(hdev);
5642 
5643 	cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5644 	if (!cmd)
5645 		goto unlock;
5646 
5647 	cmd->cmd_complete(cmd, mgmt_status(status));
5648 	mgmt_pending_remove(cmd);
5649 
5650 unlock:
5651 	hci_dev_unlock(hdev);
5652 }
5653 
5654 static int add_device(struct sock *sk, struct hci_dev *hdev,
5655 		      void *data, u16 len)
5656 {
5657 	struct mgmt_cp_add_device *cp = data;
5658 	struct pending_cmd *cmd;
5659 	struct hci_request req;
5660 	u8 auto_conn, addr_type;
5661 	int err;
5662 
5663 	BT_DBG("%s", hdev->name);
5664 
5665 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5666 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5667 		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5668 				    MGMT_STATUS_INVALID_PARAMS,
5669 				    &cp->addr, sizeof(cp->addr));
5670 
5671 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5672 		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5673 				    MGMT_STATUS_INVALID_PARAMS,
5674 				    &cp->addr, sizeof(cp->addr));
5675 
5676 	hci_req_init(&req, hdev);
5677 
5678 	hci_dev_lock(hdev);
5679 
5680 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5681 	if (!cmd) {
5682 		err = -ENOMEM;
5683 		goto unlock;
5684 	}
5685 
5686 	cmd->cmd_complete = addr_cmd_complete;
5687 
5688 	if (cp->addr.type == BDADDR_BREDR) {
5689 		/* Only incoming connections action is supported for now */
5690 		if (cp->action != 0x01) {
5691 			err = cmd->cmd_complete(cmd,
5692 						MGMT_STATUS_INVALID_PARAMS);
5693 			mgmt_pending_remove(cmd);
5694 			goto unlock;
5695 		}
5696 
5697 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5698 					  cp->addr.type);
5699 		if (err)
5700 			goto unlock;
5701 
5702 		__hci_update_page_scan(&req);
5703 
5704 		goto added;
5705 	}
5706 
5707 	if (cp->addr.type == BDADDR_LE_PUBLIC)
5708 		addr_type = ADDR_LE_DEV_PUBLIC;
5709 	else
5710 		addr_type = ADDR_LE_DEV_RANDOM;
5711 
5712 	if (cp->action == 0x02)
5713 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5714 	else if (cp->action == 0x01)
5715 		auto_conn = HCI_AUTO_CONN_DIRECT;
5716 	else
5717 		auto_conn = HCI_AUTO_CONN_REPORT;
5718 
5719 	/* If the connection parameters don't exist for this device,
5720 	 * they will be created and configured with defaults.
5721 	 */
5722 	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5723 				auto_conn) < 0) {
5724 		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5725 		mgmt_pending_remove(cmd);
5726 		goto unlock;
5727 	}
5728 
5729 added:
5730 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5731 
5732 	err = hci_req_run(&req, add_device_complete);
5733 	if (err < 0) {
5734 		/* ENODATA means no HCI commands were needed (e.g. if
5735 		 * the adapter is powered off).
5736 		 */
5737 		if (err == -ENODATA)
5738 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5739 		mgmt_pending_remove(cmd);
5740 	}
5741 
5742 unlock:
5743 	hci_dev_unlock(hdev);
5744 	return err;
5745 }
5746 
5747 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5748 			   bdaddr_t *bdaddr, u8 type)
5749 {
5750 	struct mgmt_ev_device_removed ev;
5751 
5752 	bacpy(&ev.addr.bdaddr, bdaddr);
5753 	ev.addr.type = type;
5754 
5755 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5756 }
5757 
5758 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5759 {
5760 	struct pending_cmd *cmd;
5761 
5762 	BT_DBG("status 0x%02x", status);
5763 
5764 	hci_dev_lock(hdev);
5765 
5766 	cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5767 	if (!cmd)
5768 		goto unlock;
5769 
5770 	cmd->cmd_complete(cmd, mgmt_status(status));
5771 	mgmt_pending_remove(cmd);
5772 
5773 unlock:
5774 	hci_dev_unlock(hdev);
5775 }
5776 
5777 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5778 			 void *data, u16 len)
5779 {
5780 	struct mgmt_cp_remove_device *cp = data;
5781 	struct pending_cmd *cmd;
5782 	struct hci_request req;
5783 	int err;
5784 
5785 	BT_DBG("%s", hdev->name);
5786 
5787 	hci_req_init(&req, hdev);
5788 
5789 	hci_dev_lock(hdev);
5790 
5791 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5792 	if (!cmd) {
5793 		err = -ENOMEM;
5794 		goto unlock;
5795 	}
5796 
5797 	cmd->cmd_complete = addr_cmd_complete;
5798 
5799 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5800 		struct hci_conn_params *params;
5801 		u8 addr_type;
5802 
5803 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5804 			err = cmd->cmd_complete(cmd,
5805 						MGMT_STATUS_INVALID_PARAMS);
5806 			mgmt_pending_remove(cmd);
5807 			goto unlock;
5808 		}
5809 
5810 		if (cp->addr.type == BDADDR_BREDR) {
5811 			err = hci_bdaddr_list_del(&hdev->whitelist,
5812 						  &cp->addr.bdaddr,
5813 						  cp->addr.type);
5814 			if (err) {
5815 				err = cmd->cmd_complete(cmd,
5816 							MGMT_STATUS_INVALID_PARAMS);
5817 				mgmt_pending_remove(cmd);
5818 				goto unlock;
5819 			}
5820 
5821 			__hci_update_page_scan(&req);
5822 
5823 			device_removed(sk, hdev, &cp->addr.bdaddr,
5824 				       cp->addr.type);
5825 			goto complete;
5826 		}
5827 
5828 		if (cp->addr.type == BDADDR_LE_PUBLIC)
5829 			addr_type = ADDR_LE_DEV_PUBLIC;
5830 		else
5831 			addr_type = ADDR_LE_DEV_RANDOM;
5832 
5833 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5834 						addr_type);
5835 		if (!params) {
5836 			err = cmd->cmd_complete(cmd,
5837 						MGMT_STATUS_INVALID_PARAMS);
5838 			mgmt_pending_remove(cmd);
5839 			goto unlock;
5840 		}
5841 
5842 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5843 			err = cmd->cmd_complete(cmd,
5844 						MGMT_STATUS_INVALID_PARAMS);
5845 			mgmt_pending_remove(cmd);
5846 			goto unlock;
5847 		}
5848 
5849 		list_del(&params->action);
5850 		list_del(&params->list);
5851 		kfree(params);
5852 		__hci_update_background_scan(&req);
5853 
5854 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5855 	} else {
5856 		struct hci_conn_params *p, *tmp;
5857 		struct bdaddr_list *b, *btmp;
5858 
5859 		if (cp->addr.type) {
5860 			err = cmd->cmd_complete(cmd,
5861 						MGMT_STATUS_INVALID_PARAMS);
5862 			mgmt_pending_remove(cmd);
5863 			goto unlock;
5864 		}
5865 
5866 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5867 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5868 			list_del(&b->list);
5869 			kfree(b);
5870 		}
5871 
5872 		__hci_update_page_scan(&req);
5873 
5874 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5875 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5876 				continue;
5877 			device_removed(sk, hdev, &p->addr, p->addr_type);
5878 			list_del(&p->action);
5879 			list_del(&p->list);
5880 			kfree(p);
5881 		}
5882 
5883 		BT_DBG("All LE connection parameters were removed");
5884 
5885 		__hci_update_background_scan(&req);
5886 	}
5887 
5888 complete:
5889 	err = hci_req_run(&req, remove_device_complete);
5890 	if (err < 0) {
5891 		/* ENODATA means no HCI commands were needed (e.g. if
5892 		 * the adapter is powered off).
5893 		 */
5894 		if (err == -ENODATA)
5895 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5896 		mgmt_pending_remove(cmd);
5897 	}
5898 
5899 unlock:
5900 	hci_dev_unlock(hdev);
5901 	return err;
5902 }
5903 
5904 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5905 			   u16 len)
5906 {
5907 	struct mgmt_cp_load_conn_param *cp = data;
5908 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5909 				     sizeof(struct mgmt_conn_param));
5910 	u16 param_count, expected_len;
5911 	int i;
5912 
5913 	if (!lmp_le_capable(hdev))
5914 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5915 				  MGMT_STATUS_NOT_SUPPORTED);
5916 
5917 	param_count = __le16_to_cpu(cp->param_count);
5918 	if (param_count > max_param_count) {
5919 		BT_ERR("load_conn_param: too big param_count value %u",
5920 		       param_count);
5921 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5922 				  MGMT_STATUS_INVALID_PARAMS);
5923 	}
5924 
5925 	expected_len = sizeof(*cp) + param_count *
5926 					sizeof(struct mgmt_conn_param);
5927 	if (expected_len != len) {
5928 		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5929 		       expected_len, len);
5930 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5931 				  MGMT_STATUS_INVALID_PARAMS);
5932 	}
5933 
5934 	BT_DBG("%s param_count %u", hdev->name, param_count);
5935 
5936 	hci_dev_lock(hdev);
5937 
5938 	hci_conn_params_clear_disabled(hdev);
5939 
5940 	for (i = 0; i < param_count; i++) {
5941 		struct mgmt_conn_param *param = &cp->params[i];
5942 		struct hci_conn_params *hci_param;
5943 		u16 min, max, latency, timeout;
5944 		u8 addr_type;
5945 
5946 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5947 		       param->addr.type);
5948 
5949 		if (param->addr.type == BDADDR_LE_PUBLIC) {
5950 			addr_type = ADDR_LE_DEV_PUBLIC;
5951 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
5952 			addr_type = ADDR_LE_DEV_RANDOM;
5953 		} else {
5954 			BT_ERR("Ignoring invalid connection parameters");
5955 			continue;
5956 		}
5957 
5958 		min = le16_to_cpu(param->min_interval);
5959 		max = le16_to_cpu(param->max_interval);
5960 		latency = le16_to_cpu(param->latency);
5961 		timeout = le16_to_cpu(param->timeout);
5962 
5963 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5964 		       min, max, latency, timeout);
5965 
5966 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5967 			BT_ERR("Ignoring invalid connection parameters");
5968 			continue;
5969 		}
5970 
5971 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5972 						addr_type);
5973 		if (!hci_param) {
5974 			BT_ERR("Failed to add connection parameters");
5975 			continue;
5976 		}
5977 
5978 		hci_param->conn_min_interval = min;
5979 		hci_param->conn_max_interval = max;
5980 		hci_param->conn_latency = latency;
5981 		hci_param->supervision_timeout = timeout;
5982 	}
5983 
5984 	hci_dev_unlock(hdev);
5985 
5986 	return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5987 }
5988 
5989 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5990 			       void *data, u16 len)
5991 {
5992 	struct mgmt_cp_set_external_config *cp = data;
5993 	bool changed;
5994 	int err;
5995 
5996 	BT_DBG("%s", hdev->name);
5997 
5998 	if (hdev_is_powered(hdev))
5999 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6000 				  MGMT_STATUS_REJECTED);
6001 
6002 	if (cp->config != 0x00 && cp->config != 0x01)
6003 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6004 				    MGMT_STATUS_INVALID_PARAMS);
6005 
6006 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6007 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6008 				  MGMT_STATUS_NOT_SUPPORTED);
6009 
6010 	hci_dev_lock(hdev);
6011 
6012 	if (cp->config)
6013 		changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
6014 					    &hdev->dev_flags);
6015 	else
6016 		changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
6017 					     &hdev->dev_flags);
6018 
6019 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6020 	if (err < 0)
6021 		goto unlock;
6022 
6023 	if (!changed)
6024 		goto unlock;
6025 
6026 	err = new_options(hdev, sk);
6027 
6028 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
6029 		mgmt_index_removed(hdev);
6030 
6031 		if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6032 			set_bit(HCI_CONFIG, &hdev->dev_flags);
6033 			set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6034 
6035 			queue_work(hdev->req_workqueue, &hdev->power_on);
6036 		} else {
6037 			set_bit(HCI_RAW, &hdev->flags);
6038 			mgmt_index_added(hdev);
6039 		}
6040 	}
6041 
6042 unlock:
6043 	hci_dev_unlock(hdev);
6044 	return err;
6045 }
6046 
6047 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6048 			      void *data, u16 len)
6049 {
6050 	struct mgmt_cp_set_public_address *cp = data;
6051 	bool changed;
6052 	int err;
6053 
6054 	BT_DBG("%s", hdev->name);
6055 
6056 	if (hdev_is_powered(hdev))
6057 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6058 				  MGMT_STATUS_REJECTED);
6059 
6060 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6061 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6062 				  MGMT_STATUS_INVALID_PARAMS);
6063 
6064 	if (!hdev->set_bdaddr)
6065 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6066 				  MGMT_STATUS_NOT_SUPPORTED);
6067 
6068 	hci_dev_lock(hdev);
6069 
6070 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6071 	bacpy(&hdev->public_addr, &cp->bdaddr);
6072 
6073 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6074 	if (err < 0)
6075 		goto unlock;
6076 
6077 	if (!changed)
6078 		goto unlock;
6079 
6080 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6081 		err = new_options(hdev, sk);
6082 
6083 	if (is_configured(hdev)) {
6084 		mgmt_index_removed(hdev);
6085 
6086 		clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
6087 
6088 		set_bit(HCI_CONFIG, &hdev->dev_flags);
6089 		set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6090 
6091 		queue_work(hdev->req_workqueue, &hdev->power_on);
6092 	}
6093 
6094 unlock:
6095 	hci_dev_unlock(hdev);
6096 	return err;
6097 }
6098 
6099 static const struct mgmt_handler {
6100 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6101 		     u16 data_len);
6102 	bool var_len;
6103 	size_t data_len;
6104 } mgmt_handlers[] = {
6105 	{ NULL }, /* 0x0000 (no command) */
6106 	{ read_version,           false, MGMT_READ_VERSION_SIZE },
6107 	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
6108 	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
6109 	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
6110 	{ set_powered,            false, MGMT_SETTING_SIZE },
6111 	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
6112 	{ set_connectable,        false, MGMT_SETTING_SIZE },
6113 	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
6114 	{ set_bondable,           false, MGMT_SETTING_SIZE },
6115 	{ set_link_security,      false, MGMT_SETTING_SIZE },
6116 	{ set_ssp,                false, MGMT_SETTING_SIZE },
6117 	{ set_hs,                 false, MGMT_SETTING_SIZE },
6118 	{ set_le,                 false, MGMT_SETTING_SIZE },
6119 	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
6120 	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
6121 	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
6122 	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
6123 	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
6124 	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6125 	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
6126 	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
6127 	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
6128 	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6129 	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
6130 	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
6131 	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6132 	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
6133 	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
6134 	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6135 	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
6136 	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6137 	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6138 	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6139 	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6140 	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
6141 	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
6142 	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
6143 	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
6144 	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
6145 	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
6146 	{ set_advertising,        false, MGMT_SETTING_SIZE },
6147 	{ set_bredr,              false, MGMT_SETTING_SIZE },
6148 	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
6149 	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
6150 	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
6151 	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
6152 	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
6153 	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
6154 	{ get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },
6155 	{ get_clock_info,         false, MGMT_GET_CLOCK_INFO_SIZE },
6156 	{ add_device,             false, MGMT_ADD_DEVICE_SIZE },
6157 	{ remove_device,          false, MGMT_REMOVE_DEVICE_SIZE },
6158 	{ load_conn_param,        true,  MGMT_LOAD_CONN_PARAM_SIZE },
6159 	{ read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6160 	{ read_config_info,       false, MGMT_READ_CONFIG_INFO_SIZE },
6161 	{ set_external_config,    false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6162 	{ set_public_address,     false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6163 	{ start_service_discovery,true,  MGMT_START_SERVICE_DISCOVERY_SIZE },
6164 };
6165 
6166 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6167 {
6168 	void *buf;
6169 	u8 *cp;
6170 	struct mgmt_hdr *hdr;
6171 	u16 opcode, index, len;
6172 	struct hci_dev *hdev = NULL;
6173 	const struct mgmt_handler *handler;
6174 	int err;
6175 
6176 	BT_DBG("got %zu bytes", msglen);
6177 
6178 	if (msglen < sizeof(*hdr))
6179 		return -EINVAL;
6180 
6181 	buf = kmalloc(msglen, GFP_KERNEL);
6182 	if (!buf)
6183 		return -ENOMEM;
6184 
6185 	if (memcpy_from_msg(buf, msg, msglen)) {
6186 		err = -EFAULT;
6187 		goto done;
6188 	}
6189 
6190 	hdr = buf;
6191 	opcode = __le16_to_cpu(hdr->opcode);
6192 	index = __le16_to_cpu(hdr->index);
6193 	len = __le16_to_cpu(hdr->len);
6194 
6195 	if (len != msglen - sizeof(*hdr)) {
6196 		err = -EINVAL;
6197 		goto done;
6198 	}
6199 
6200 	if (index != MGMT_INDEX_NONE) {
6201 		hdev = hci_dev_get(index);
6202 		if (!hdev) {
6203 			err = cmd_status(sk, index, opcode,
6204 					 MGMT_STATUS_INVALID_INDEX);
6205 			goto done;
6206 		}
6207 
6208 		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6209 		    test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6210 		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6211 			err = cmd_status(sk, index, opcode,
6212 					 MGMT_STATUS_INVALID_INDEX);
6213 			goto done;
6214 		}
6215 
6216 		if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6217 		    opcode != MGMT_OP_READ_CONFIG_INFO &&
6218 		    opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6219 		    opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6220 			err = cmd_status(sk, index, opcode,
6221 					 MGMT_STATUS_INVALID_INDEX);
6222 			goto done;
6223 		}
6224 	}
6225 
6226 	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6227 	    mgmt_handlers[opcode].func == NULL) {
6228 		BT_DBG("Unknown op %u", opcode);
6229 		err = cmd_status(sk, index, opcode,
6230 				 MGMT_STATUS_UNKNOWN_COMMAND);
6231 		goto done;
6232 	}
6233 
6234 	if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6235 		     opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6236 		err = cmd_status(sk, index, opcode,
6237 				 MGMT_STATUS_INVALID_INDEX);
6238 		goto done;
6239 	}
6240 
6241 	if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6242 		      opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6243 		err = cmd_status(sk, index, opcode,
6244 				 MGMT_STATUS_INVALID_INDEX);
6245 		goto done;
6246 	}
6247 
6248 	handler = &mgmt_handlers[opcode];
6249 
6250 	if ((handler->var_len && len < handler->data_len) ||
6251 	    (!handler->var_len && len != handler->data_len)) {
6252 		err = cmd_status(sk, index, opcode,
6253 				 MGMT_STATUS_INVALID_PARAMS);
6254 		goto done;
6255 	}
6256 
6257 	if (hdev)
6258 		mgmt_init_hdev(sk, hdev);
6259 
6260 	cp = buf + sizeof(*hdr);
6261 
6262 	err = handler->func(sk, hdev, cp, len);
6263 	if (err < 0)
6264 		goto done;
6265 
6266 	err = msglen;
6267 
6268 done:
6269 	if (hdev)
6270 		hci_dev_put(hdev);
6271 
6272 	kfree(buf);
6273 	return err;
6274 }
6275 
6276 void mgmt_index_added(struct hci_dev *hdev)
6277 {
6278 	if (hdev->dev_type != HCI_BREDR)
6279 		return;
6280 
6281 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6282 		return;
6283 
6284 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6285 		mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6286 	else
6287 		mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6288 }
6289 
6290 void mgmt_index_removed(struct hci_dev *hdev)
6291 {
6292 	u8 status = MGMT_STATUS_INVALID_INDEX;
6293 
6294 	if (hdev->dev_type != HCI_BREDR)
6295 		return;
6296 
6297 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6298 		return;
6299 
6300 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6301 
6302 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6303 		mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6304 	else
6305 		mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6306 }
6307 
6308 /* This function requires the caller holds hdev->lock */
6309 static void restart_le_actions(struct hci_request *req)
6310 {
6311 	struct hci_dev *hdev = req->hdev;
6312 	struct hci_conn_params *p;
6313 
6314 	list_for_each_entry(p, &hdev->le_conn_params, list) {
6315 		/* Needed for AUTO_OFF case where might not "really"
6316 		 * have been powered off.
6317 		 */
6318 		list_del_init(&p->action);
6319 
6320 		switch (p->auto_connect) {
6321 		case HCI_AUTO_CONN_DIRECT:
6322 		case HCI_AUTO_CONN_ALWAYS:
6323 			list_add(&p->action, &hdev->pend_le_conns);
6324 			break;
6325 		case HCI_AUTO_CONN_REPORT:
6326 			list_add(&p->action, &hdev->pend_le_reports);
6327 			break;
6328 		default:
6329 			break;
6330 		}
6331 	}
6332 
6333 	__hci_update_background_scan(req);
6334 }
6335 
6336 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6337 {
6338 	struct cmd_lookup match = { NULL, hdev };
6339 
6340 	BT_DBG("status 0x%02x", status);
6341 
6342 	if (!status) {
6343 		/* Register the available SMP channels (BR/EDR and LE) only
6344 		 * when successfully powering on the controller. This late
6345 		 * registration is required so that LE SMP can clearly
6346 		 * decide if the public address or static address is used.
6347 		 */
6348 		smp_register(hdev);
6349 	}
6350 
6351 	hci_dev_lock(hdev);
6352 
6353 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6354 
6355 	new_settings(hdev, match.sk);
6356 
6357 	hci_dev_unlock(hdev);
6358 
6359 	if (match.sk)
6360 		sock_put(match.sk);
6361 }
6362 
6363 static int powered_update_hci(struct hci_dev *hdev)
6364 {
6365 	struct hci_request req;
6366 	u8 link_sec;
6367 
6368 	hci_req_init(&req, hdev);
6369 
6370 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6371 	    !lmp_host_ssp_capable(hdev)) {
6372 		u8 mode = 0x01;
6373 
6374 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6375 
6376 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6377 			u8 support = 0x01;
6378 
6379 			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6380 				    sizeof(support), &support);
6381 		}
6382 	}
6383 
6384 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6385 	    lmp_bredr_capable(hdev)) {
6386 		struct hci_cp_write_le_host_supported cp;
6387 
6388 		cp.le = 0x01;
6389 		cp.simul = 0x00;
6390 
6391 		/* Check first if we already have the right
6392 		 * host state (host features set)
6393 		 */
6394 		if (cp.le != lmp_host_le_capable(hdev) ||
6395 		    cp.simul != lmp_host_le_br_capable(hdev))
6396 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6397 				    sizeof(cp), &cp);
6398 	}
6399 
6400 	if (lmp_le_capable(hdev)) {
6401 		/* Make sure the controller has a good default for
6402 		 * advertising data. This also applies to the case
6403 		 * where BR/EDR was toggled during the AUTO_OFF phase.
6404 		 */
6405 		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6406 			update_adv_data(&req);
6407 			update_scan_rsp_data(&req);
6408 		}
6409 
6410 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6411 			enable_advertising(&req);
6412 
6413 		restart_le_actions(&req);
6414 	}
6415 
6416 	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6417 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6418 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6419 			    sizeof(link_sec), &link_sec);
6420 
6421 	if (lmp_bredr_capable(hdev)) {
6422 		write_fast_connectable(&req, false);
6423 		__hci_update_page_scan(&req);
6424 		update_class(&req);
6425 		update_name(&req);
6426 		update_eir(&req);
6427 	}
6428 
6429 	return hci_req_run(&req, powered_complete);
6430 }
6431 
6432 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6433 {
6434 	struct cmd_lookup match = { NULL, hdev };
6435 	u8 status, zero_cod[] = { 0, 0, 0 };
6436 	int err;
6437 
6438 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6439 		return 0;
6440 
6441 	if (powered) {
6442 		if (powered_update_hci(hdev) == 0)
6443 			return 0;
6444 
6445 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6446 				     &match);
6447 		goto new_settings;
6448 	}
6449 
6450 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6451 
6452 	/* If the power off is because of hdev unregistration let
6453 	 * use the appropriate INVALID_INDEX status. Otherwise use
6454 	 * NOT_POWERED. We cover both scenarios here since later in
6455 	 * mgmt_index_removed() any hci_conn callbacks will have already
6456 	 * been triggered, potentially causing misleading DISCONNECTED
6457 	 * status responses.
6458 	 */
6459 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6460 		status = MGMT_STATUS_INVALID_INDEX;
6461 	else
6462 		status = MGMT_STATUS_NOT_POWERED;
6463 
6464 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6465 
6466 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6467 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6468 			   zero_cod, sizeof(zero_cod), NULL);
6469 
6470 new_settings:
6471 	err = new_settings(hdev, match.sk);
6472 
6473 	if (match.sk)
6474 		sock_put(match.sk);
6475 
6476 	return err;
6477 }
6478 
6479 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6480 {
6481 	struct pending_cmd *cmd;
6482 	u8 status;
6483 
6484 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6485 	if (!cmd)
6486 		return;
6487 
6488 	if (err == -ERFKILL)
6489 		status = MGMT_STATUS_RFKILLED;
6490 	else
6491 		status = MGMT_STATUS_FAILED;
6492 
6493 	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6494 
6495 	mgmt_pending_remove(cmd);
6496 }
6497 
6498 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6499 {
6500 	struct hci_request req;
6501 
6502 	hci_dev_lock(hdev);
6503 
6504 	/* When discoverable timeout triggers, then just make sure
6505 	 * the limited discoverable flag is cleared. Even in the case
6506 	 * of a timeout triggered from general discoverable, it is
6507 	 * safe to unconditionally clear the flag.
6508 	 */
6509 	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6510 	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6511 
6512 	hci_req_init(&req, hdev);
6513 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6514 		u8 scan = SCAN_PAGE;
6515 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6516 			    sizeof(scan), &scan);
6517 	}
6518 	update_class(&req);
6519 	update_adv_data(&req);
6520 	hci_req_run(&req, NULL);
6521 
6522 	hdev->discov_timeout = 0;
6523 
6524 	new_settings(hdev, NULL);
6525 
6526 	hci_dev_unlock(hdev);
6527 }
6528 
6529 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6530 		       bool persistent)
6531 {
6532 	struct mgmt_ev_new_link_key ev;
6533 
6534 	memset(&ev, 0, sizeof(ev));
6535 
6536 	ev.store_hint = persistent;
6537 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6538 	ev.key.addr.type = BDADDR_BREDR;
6539 	ev.key.type = key->type;
6540 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6541 	ev.key.pin_len = key->pin_len;
6542 
6543 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6544 }
6545 
6546 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6547 {
6548 	switch (ltk->type) {
6549 	case SMP_LTK:
6550 	case SMP_LTK_SLAVE:
6551 		if (ltk->authenticated)
6552 			return MGMT_LTK_AUTHENTICATED;
6553 		return MGMT_LTK_UNAUTHENTICATED;
6554 	case SMP_LTK_P256:
6555 		if (ltk->authenticated)
6556 			return MGMT_LTK_P256_AUTH;
6557 		return MGMT_LTK_P256_UNAUTH;
6558 	case SMP_LTK_P256_DEBUG:
6559 		return MGMT_LTK_P256_DEBUG;
6560 	}
6561 
6562 	return MGMT_LTK_UNAUTHENTICATED;
6563 }
6564 
6565 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6566 {
6567 	struct mgmt_ev_new_long_term_key ev;
6568 
6569 	memset(&ev, 0, sizeof(ev));
6570 
6571 	/* Devices using resolvable or non-resolvable random addresses
6572 	 * without providing an indentity resolving key don't require
6573 	 * to store long term keys. Their addresses will change the
6574 	 * next time around.
6575 	 *
6576 	 * Only when a remote device provides an identity address
6577 	 * make sure the long term key is stored. If the remote
6578 	 * identity is known, the long term keys are internally
6579 	 * mapped to the identity address. So allow static random
6580 	 * and public addresses here.
6581 	 */
6582 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6583 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
6584 		ev.store_hint = 0x00;
6585 	else
6586 		ev.store_hint = persistent;
6587 
6588 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6589 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6590 	ev.key.type = mgmt_ltk_type(key);
6591 	ev.key.enc_size = key->enc_size;
6592 	ev.key.ediv = key->ediv;
6593 	ev.key.rand = key->rand;
6594 
6595 	if (key->type == SMP_LTK)
6596 		ev.key.master = 1;
6597 
6598 	memcpy(ev.key.val, key->val, sizeof(key->val));
6599 
6600 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6601 }
6602 
6603 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6604 {
6605 	struct mgmt_ev_new_irk ev;
6606 
6607 	memset(&ev, 0, sizeof(ev));
6608 
6609 	/* For identity resolving keys from devices that are already
6610 	 * using a public address or static random address, do not
6611 	 * ask for storing this key. The identity resolving key really
6612 	 * is only mandatory for devices using resovlable random
6613 	 * addresses.
6614 	 *
6615 	 * Storing all identity resolving keys has the downside that
6616 	 * they will be also loaded on next boot of they system. More
6617 	 * identity resolving keys, means more time during scanning is
6618 	 * needed to actually resolve these addresses.
6619 	 */
6620 	if (bacmp(&irk->rpa, BDADDR_ANY))
6621 		ev.store_hint = 0x01;
6622 	else
6623 		ev.store_hint = 0x00;
6624 
6625 	bacpy(&ev.rpa, &irk->rpa);
6626 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6627 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6628 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6629 
6630 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6631 }
6632 
6633 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6634 		   bool persistent)
6635 {
6636 	struct mgmt_ev_new_csrk ev;
6637 
6638 	memset(&ev, 0, sizeof(ev));
6639 
6640 	/* Devices using resolvable or non-resolvable random addresses
6641 	 * without providing an indentity resolving key don't require
6642 	 * to store signature resolving keys. Their addresses will change
6643 	 * the next time around.
6644 	 *
6645 	 * Only when a remote device provides an identity address
6646 	 * make sure the signature resolving key is stored. So allow
6647 	 * static random and public addresses here.
6648 	 */
6649 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6650 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6651 		ev.store_hint = 0x00;
6652 	else
6653 		ev.store_hint = persistent;
6654 
6655 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6656 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6657 	ev.key.master = csrk->master;
6658 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6659 
6660 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6661 }
6662 
6663 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6664 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6665 			 u16 max_interval, u16 latency, u16 timeout)
6666 {
6667 	struct mgmt_ev_new_conn_param ev;
6668 
6669 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
6670 		return;
6671 
6672 	memset(&ev, 0, sizeof(ev));
6673 	bacpy(&ev.addr.bdaddr, bdaddr);
6674 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6675 	ev.store_hint = store_hint;
6676 	ev.min_interval = cpu_to_le16(min_interval);
6677 	ev.max_interval = cpu_to_le16(max_interval);
6678 	ev.latency = cpu_to_le16(latency);
6679 	ev.timeout = cpu_to_le16(timeout);
6680 
6681 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6682 }
6683 
6684 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6685 				  u8 data_len)
6686 {
6687 	eir[eir_len++] = sizeof(type) + data_len;
6688 	eir[eir_len++] = type;
6689 	memcpy(&eir[eir_len], data, data_len);
6690 	eir_len += data_len;
6691 
6692 	return eir_len;
6693 }
6694 
6695 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6696 			   u32 flags, u8 *name, u8 name_len)
6697 {
6698 	char buf[512];
6699 	struct mgmt_ev_device_connected *ev = (void *) buf;
6700 	u16 eir_len = 0;
6701 
6702 	bacpy(&ev->addr.bdaddr, &conn->dst);
6703 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6704 
6705 	ev->flags = __cpu_to_le32(flags);
6706 
6707 	/* We must ensure that the EIR Data fields are ordered and
6708 	 * unique. Keep it simple for now and avoid the problem by not
6709 	 * adding any BR/EDR data to the LE adv.
6710 	 */
6711 	if (conn->le_adv_data_len > 0) {
6712 		memcpy(&ev->eir[eir_len],
6713 		       conn->le_adv_data, conn->le_adv_data_len);
6714 		eir_len = conn->le_adv_data_len;
6715 	} else {
6716 		if (name_len > 0)
6717 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6718 						  name, name_len);
6719 
6720 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6721 			eir_len = eir_append_data(ev->eir, eir_len,
6722 						  EIR_CLASS_OF_DEV,
6723 						  conn->dev_class, 3);
6724 	}
6725 
6726 	ev->eir_len = cpu_to_le16(eir_len);
6727 
6728 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6729 		    sizeof(*ev) + eir_len, NULL);
6730 }
6731 
6732 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6733 {
6734 	struct sock **sk = data;
6735 
6736 	cmd->cmd_complete(cmd, 0);
6737 
6738 	*sk = cmd->sk;
6739 	sock_hold(*sk);
6740 
6741 	mgmt_pending_remove(cmd);
6742 }
6743 
6744 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6745 {
6746 	struct hci_dev *hdev = data;
6747 	struct mgmt_cp_unpair_device *cp = cmd->param;
6748 
6749 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6750 
6751 	cmd->cmd_complete(cmd, 0);
6752 	mgmt_pending_remove(cmd);
6753 }
6754 
6755 bool mgmt_powering_down(struct hci_dev *hdev)
6756 {
6757 	struct pending_cmd *cmd;
6758 	struct mgmt_mode *cp;
6759 
6760 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6761 	if (!cmd)
6762 		return false;
6763 
6764 	cp = cmd->param;
6765 	if (!cp->val)
6766 		return true;
6767 
6768 	return false;
6769 }
6770 
6771 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6772 			      u8 link_type, u8 addr_type, u8 reason,
6773 			      bool mgmt_connected)
6774 {
6775 	struct mgmt_ev_device_disconnected ev;
6776 	struct sock *sk = NULL;
6777 
6778 	/* The connection is still in hci_conn_hash so test for 1
6779 	 * instead of 0 to know if this is the last one.
6780 	 */
6781 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6782 		cancel_delayed_work(&hdev->power_off);
6783 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6784 	}
6785 
6786 	if (!mgmt_connected)
6787 		return;
6788 
6789 	if (link_type != ACL_LINK && link_type != LE_LINK)
6790 		return;
6791 
6792 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6793 
6794 	bacpy(&ev.addr.bdaddr, bdaddr);
6795 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6796 	ev.reason = reason;
6797 
6798 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6799 
6800 	if (sk)
6801 		sock_put(sk);
6802 
6803 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6804 			     hdev);
6805 }
6806 
6807 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6808 			    u8 link_type, u8 addr_type, u8 status)
6809 {
6810 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6811 	struct mgmt_cp_disconnect *cp;
6812 	struct pending_cmd *cmd;
6813 
6814 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6815 			     hdev);
6816 
6817 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6818 	if (!cmd)
6819 		return;
6820 
6821 	cp = cmd->param;
6822 
6823 	if (bacmp(bdaddr, &cp->addr.bdaddr))
6824 		return;
6825 
6826 	if (cp->addr.type != bdaddr_type)
6827 		return;
6828 
6829 	cmd->cmd_complete(cmd, mgmt_status(status));
6830 	mgmt_pending_remove(cmd);
6831 }
6832 
6833 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6834 			 u8 addr_type, u8 status)
6835 {
6836 	struct mgmt_ev_connect_failed ev;
6837 
6838 	/* The connection is still in hci_conn_hash so test for 1
6839 	 * instead of 0 to know if this is the last one.
6840 	 */
6841 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6842 		cancel_delayed_work(&hdev->power_off);
6843 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6844 	}
6845 
6846 	bacpy(&ev.addr.bdaddr, bdaddr);
6847 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6848 	ev.status = mgmt_status(status);
6849 
6850 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6851 }
6852 
6853 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6854 {
6855 	struct mgmt_ev_pin_code_request ev;
6856 
6857 	bacpy(&ev.addr.bdaddr, bdaddr);
6858 	ev.addr.type = BDADDR_BREDR;
6859 	ev.secure = secure;
6860 
6861 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6862 }
6863 
6864 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6865 				  u8 status)
6866 {
6867 	struct pending_cmd *cmd;
6868 
6869 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6870 	if (!cmd)
6871 		return;
6872 
6873 	cmd->cmd_complete(cmd, mgmt_status(status));
6874 	mgmt_pending_remove(cmd);
6875 }
6876 
6877 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6878 				      u8 status)
6879 {
6880 	struct pending_cmd *cmd;
6881 
6882 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6883 	if (!cmd)
6884 		return;
6885 
6886 	cmd->cmd_complete(cmd, mgmt_status(status));
6887 	mgmt_pending_remove(cmd);
6888 }
6889 
6890 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6891 			      u8 link_type, u8 addr_type, u32 value,
6892 			      u8 confirm_hint)
6893 {
6894 	struct mgmt_ev_user_confirm_request ev;
6895 
6896 	BT_DBG("%s", hdev->name);
6897 
6898 	bacpy(&ev.addr.bdaddr, bdaddr);
6899 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6900 	ev.confirm_hint = confirm_hint;
6901 	ev.value = cpu_to_le32(value);
6902 
6903 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6904 			  NULL);
6905 }
6906 
6907 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6908 			      u8 link_type, u8 addr_type)
6909 {
6910 	struct mgmt_ev_user_passkey_request ev;
6911 
6912 	BT_DBG("%s", hdev->name);
6913 
6914 	bacpy(&ev.addr.bdaddr, bdaddr);
6915 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6916 
6917 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6918 			  NULL);
6919 }
6920 
6921 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6922 				      u8 link_type, u8 addr_type, u8 status,
6923 				      u8 opcode)
6924 {
6925 	struct pending_cmd *cmd;
6926 
6927 	cmd = mgmt_pending_find(opcode, hdev);
6928 	if (!cmd)
6929 		return -ENOENT;
6930 
6931 	cmd->cmd_complete(cmd, mgmt_status(status));
6932 	mgmt_pending_remove(cmd);
6933 
6934 	return 0;
6935 }
6936 
6937 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6938 				     u8 link_type, u8 addr_type, u8 status)
6939 {
6940 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6941 					  status, MGMT_OP_USER_CONFIRM_REPLY);
6942 }
6943 
6944 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6945 					 u8 link_type, u8 addr_type, u8 status)
6946 {
6947 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6948 					  status,
6949 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
6950 }
6951 
6952 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6953 				     u8 link_type, u8 addr_type, u8 status)
6954 {
6955 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6956 					  status, MGMT_OP_USER_PASSKEY_REPLY);
6957 }
6958 
6959 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6960 					 u8 link_type, u8 addr_type, u8 status)
6961 {
6962 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6963 					  status,
6964 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
6965 }
6966 
6967 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6968 			     u8 link_type, u8 addr_type, u32 passkey,
6969 			     u8 entered)
6970 {
6971 	struct mgmt_ev_passkey_notify ev;
6972 
6973 	BT_DBG("%s", hdev->name);
6974 
6975 	bacpy(&ev.addr.bdaddr, bdaddr);
6976 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6977 	ev.passkey = __cpu_to_le32(passkey);
6978 	ev.entered = entered;
6979 
6980 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6981 }
6982 
6983 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6984 {
6985 	struct mgmt_ev_auth_failed ev;
6986 	struct pending_cmd *cmd;
6987 	u8 status = mgmt_status(hci_status);
6988 
6989 	bacpy(&ev.addr.bdaddr, &conn->dst);
6990 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6991 	ev.status = status;
6992 
6993 	cmd = find_pairing(conn);
6994 
6995 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6996 		    cmd ? cmd->sk : NULL);
6997 
6998 	if (cmd) {
6999 		cmd->cmd_complete(cmd, status);
7000 		mgmt_pending_remove(cmd);
7001 	}
7002 }
7003 
7004 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7005 {
7006 	struct cmd_lookup match = { NULL, hdev };
7007 	bool changed;
7008 
7009 	if (status) {
7010 		u8 mgmt_err = mgmt_status(status);
7011 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7012 				     cmd_status_rsp, &mgmt_err);
7013 		return;
7014 	}
7015 
7016 	if (test_bit(HCI_AUTH, &hdev->flags))
7017 		changed = !test_and_set_bit(HCI_LINK_SECURITY,
7018 					    &hdev->dev_flags);
7019 	else
7020 		changed = test_and_clear_bit(HCI_LINK_SECURITY,
7021 					     &hdev->dev_flags);
7022 
7023 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7024 			     &match);
7025 
7026 	if (changed)
7027 		new_settings(hdev, match.sk);
7028 
7029 	if (match.sk)
7030 		sock_put(match.sk);
7031 }
7032 
7033 static void clear_eir(struct hci_request *req)
7034 {
7035 	struct hci_dev *hdev = req->hdev;
7036 	struct hci_cp_write_eir cp;
7037 
7038 	if (!lmp_ext_inq_capable(hdev))
7039 		return;
7040 
7041 	memset(hdev->eir, 0, sizeof(hdev->eir));
7042 
7043 	memset(&cp, 0, sizeof(cp));
7044 
7045 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7046 }
7047 
7048 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7049 {
7050 	struct cmd_lookup match = { NULL, hdev };
7051 	struct hci_request req;
7052 	bool changed = false;
7053 
7054 	if (status) {
7055 		u8 mgmt_err = mgmt_status(status);
7056 
7057 		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
7058 						 &hdev->dev_flags)) {
7059 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7060 			new_settings(hdev, NULL);
7061 		}
7062 
7063 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7064 				     &mgmt_err);
7065 		return;
7066 	}
7067 
7068 	if (enable) {
7069 		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7070 	} else {
7071 		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7072 		if (!changed)
7073 			changed = test_and_clear_bit(HCI_HS_ENABLED,
7074 						     &hdev->dev_flags);
7075 		else
7076 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7077 	}
7078 
7079 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7080 
7081 	if (changed)
7082 		new_settings(hdev, match.sk);
7083 
7084 	if (match.sk)
7085 		sock_put(match.sk);
7086 
7087 	hci_req_init(&req, hdev);
7088 
7089 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7090 		if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
7091 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7092 				    sizeof(enable), &enable);
7093 		update_eir(&req);
7094 	} else {
7095 		clear_eir(&req);
7096 	}
7097 
7098 	hci_req_run(&req, NULL);
7099 }
7100 
7101 static void sk_lookup(struct pending_cmd *cmd, void *data)
7102 {
7103 	struct cmd_lookup *match = data;
7104 
7105 	if (match->sk == NULL) {
7106 		match->sk = cmd->sk;
7107 		sock_hold(match->sk);
7108 	}
7109 }
7110 
7111 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7112 				    u8 status)
7113 {
7114 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7115 
7116 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7117 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7118 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7119 
7120 	if (!status)
7121 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7122 			   NULL);
7123 
7124 	if (match.sk)
7125 		sock_put(match.sk);
7126 }
7127 
7128 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7129 {
7130 	struct mgmt_cp_set_local_name ev;
7131 	struct pending_cmd *cmd;
7132 
7133 	if (status)
7134 		return;
7135 
7136 	memset(&ev, 0, sizeof(ev));
7137 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7138 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7139 
7140 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7141 	if (!cmd) {
7142 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7143 
7144 		/* If this is a HCI command related to powering on the
7145 		 * HCI dev don't send any mgmt signals.
7146 		 */
7147 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7148 			return;
7149 	}
7150 
7151 	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7152 		   cmd ? cmd->sk : NULL);
7153 }
7154 
7155 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7156 				       u8 *rand192, u8 *hash256, u8 *rand256,
7157 				       u8 status)
7158 {
7159 	struct pending_cmd *cmd;
7160 
7161 	BT_DBG("%s status %u", hdev->name, status);
7162 
7163 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7164 	if (!cmd)
7165 		return;
7166 
7167 	if (status) {
7168 		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7169 			   mgmt_status(status));
7170 	} else {
7171 		struct mgmt_rp_read_local_oob_data rp;
7172 		size_t rp_size = sizeof(rp);
7173 
7174 		memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7175 		memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7176 
7177 		if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7178 			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7179 			memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7180 		} else {
7181 			rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7182 		}
7183 
7184 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7185 			     &rp, rp_size);
7186 	}
7187 
7188 	mgmt_pending_remove(cmd);
7189 }
7190 
7191 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7192 {
7193 	int i;
7194 
7195 	for (i = 0; i < uuid_count; i++) {
7196 		if (!memcmp(uuid, uuids[i], 16))
7197 			return true;
7198 	}
7199 
7200 	return false;
7201 }
7202 
7203 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7204 {
7205 	u16 parsed = 0;
7206 
7207 	while (parsed < eir_len) {
7208 		u8 field_len = eir[0];
7209 		u8 uuid[16];
7210 		int i;
7211 
7212 		if (field_len == 0)
7213 			break;
7214 
7215 		if (eir_len - parsed < field_len + 1)
7216 			break;
7217 
7218 		switch (eir[1]) {
7219 		case EIR_UUID16_ALL:
7220 		case EIR_UUID16_SOME:
7221 			for (i = 0; i + 3 <= field_len; i += 2) {
7222 				memcpy(uuid, bluetooth_base_uuid, 16);
7223 				uuid[13] = eir[i + 3];
7224 				uuid[12] = eir[i + 2];
7225 				if (has_uuid(uuid, uuid_count, uuids))
7226 					return true;
7227 			}
7228 			break;
7229 		case EIR_UUID32_ALL:
7230 		case EIR_UUID32_SOME:
7231 			for (i = 0; i + 5 <= field_len; i += 4) {
7232 				memcpy(uuid, bluetooth_base_uuid, 16);
7233 				uuid[15] = eir[i + 5];
7234 				uuid[14] = eir[i + 4];
7235 				uuid[13] = eir[i + 3];
7236 				uuid[12] = eir[i + 2];
7237 				if (has_uuid(uuid, uuid_count, uuids))
7238 					return true;
7239 			}
7240 			break;
7241 		case EIR_UUID128_ALL:
7242 		case EIR_UUID128_SOME:
7243 			for (i = 0; i + 17 <= field_len; i += 16) {
7244 				memcpy(uuid, eir + i + 2, 16);
7245 				if (has_uuid(uuid, uuid_count, uuids))
7246 					return true;
7247 			}
7248 			break;
7249 		}
7250 
7251 		parsed += field_len + 1;
7252 		eir += field_len + 1;
7253 	}
7254 
7255 	return false;
7256 }
7257 
7258 static void restart_le_scan(struct hci_dev *hdev)
7259 {
7260 	/* If controller is not scanning we are done. */
7261 	if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
7262 		return;
7263 
7264 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7265 		       hdev->discovery.scan_start +
7266 		       hdev->discovery.scan_duration))
7267 		return;
7268 
7269 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7270 			   DISCOV_LE_RESTART_DELAY);
7271 }
7272 
7273 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7274 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7275 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7276 {
7277 	char buf[512];
7278 	struct mgmt_ev_device_found *ev = (void *) buf;
7279 	size_t ev_size;
7280 	bool match;
7281 
7282 	/* Don't send events for a non-kernel initiated discovery. With
7283 	 * LE one exception is if we have pend_le_reports > 0 in which
7284 	 * case we're doing passive scanning and want these events.
7285 	 */
7286 	if (!hci_discovery_active(hdev)) {
7287 		if (link_type == ACL_LINK)
7288 			return;
7289 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7290 			return;
7291 	}
7292 
7293 	/* When using service discovery with a RSSI threshold, then check
7294 	 * if such a RSSI threshold is specified. If a RSSI threshold has
7295 	 * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
7296 	 * then all results with a RSSI smaller than the RSSI threshold will be
7297 	 * dropped. If the quirk is set, let it through for further processing,
7298 	 * as we might need to restart the scan.
7299 	 *
7300 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7301 	 * the results are also dropped.
7302 	 */
7303 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7304 	    (rssi == HCI_RSSI_INVALID ||
7305 	    (rssi < hdev->discovery.rssi &&
7306 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7307 		return;
7308 
7309 	/* Make sure that the buffer is big enough. The 5 extra bytes
7310 	 * are for the potential CoD field.
7311 	 */
7312 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7313 		return;
7314 
7315 	memset(buf, 0, sizeof(buf));
7316 
7317 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
7318 	 * RSSI value was reported as 0 when not available. This behavior
7319 	 * is kept when using device discovery. This is required for full
7320 	 * backwards compatibility with the API.
7321 	 *
7322 	 * However when using service discovery, the value 127 will be
7323 	 * returned when the RSSI is not available.
7324 	 */
7325 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7326 	    link_type == ACL_LINK)
7327 		rssi = 0;
7328 
7329 	bacpy(&ev->addr.bdaddr, bdaddr);
7330 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7331 	ev->rssi = rssi;
7332 	ev->flags = cpu_to_le32(flags);
7333 
7334 	if (eir_len > 0) {
7335 		/* When using service discovery and a list of UUID is
7336 		 * provided, results with no matching UUID should be
7337 		 * dropped. In case there is a match the result is
7338 		 * kept and checking possible scan response data
7339 		 * will be skipped.
7340 		 */
7341 		if (hdev->discovery.uuid_count > 0) {
7342 			match = eir_has_uuids(eir, eir_len,
7343 					      hdev->discovery.uuid_count,
7344 					      hdev->discovery.uuids);
7345 			/* If duplicate filtering does not report RSSI changes,
7346 			 * then restart scanning to ensure updated result with
7347 			 * updated RSSI values.
7348 			 */
7349 			if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7350 					      &hdev->quirks))
7351 				restart_le_scan(hdev);
7352 		} else {
7353 			match = true;
7354 		}
7355 
7356 		if (!match && !scan_rsp_len)
7357 			return;
7358 
7359 		/* Copy EIR or advertising data into event */
7360 		memcpy(ev->eir, eir, eir_len);
7361 	} else {
7362 		/* When using service discovery and a list of UUID is
7363 		 * provided, results with empty EIR or advertising data
7364 		 * should be dropped since they do not match any UUID.
7365 		 */
7366 		if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7367 			return;
7368 
7369 		match = false;
7370 	}
7371 
7372 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7373 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7374 					  dev_class, 3);
7375 
7376 	if (scan_rsp_len > 0) {
7377 		/* When using service discovery and a list of UUID is
7378 		 * provided, results with no matching UUID should be
7379 		 * dropped if there is no previous match from the
7380 		 * advertising data.
7381 		 */
7382 		if (hdev->discovery.uuid_count > 0) {
7383 			if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7384 						     hdev->discovery.uuid_count,
7385 						     hdev->discovery.uuids))
7386 				return;
7387 
7388 			/* If duplicate filtering does not report RSSI changes,
7389 			 * then restart scanning to ensure updated result with
7390 			 * updated RSSI values.
7391 			 */
7392 			if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7393 				     &hdev->quirks))
7394 				restart_le_scan(hdev);
7395 		}
7396 
7397 		/* Append scan response data to event */
7398 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7399 	} else {
7400 		/* When using service discovery and a list of UUID is
7401 		 * provided, results with empty scan response and no
7402 		 * previous matched advertising data should be dropped.
7403 		 */
7404 		if (hdev->discovery.uuid_count > 0 && !match)
7405 			return;
7406 	}
7407 
7408 	/* Validate the reported RSSI value against the RSSI threshold once more
7409 	 * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
7410 	 * scanning.
7411 	 */
7412 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7413 	    rssi < hdev->discovery.rssi)
7414 		return;
7415 
7416 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7417 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7418 
7419 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7420 }
7421 
7422 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7423 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7424 {
7425 	struct mgmt_ev_device_found *ev;
7426 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7427 	u16 eir_len;
7428 
7429 	ev = (struct mgmt_ev_device_found *) buf;
7430 
7431 	memset(buf, 0, sizeof(buf));
7432 
7433 	bacpy(&ev->addr.bdaddr, bdaddr);
7434 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7435 	ev->rssi = rssi;
7436 
7437 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7438 				  name_len);
7439 
7440 	ev->eir_len = cpu_to_le16(eir_len);
7441 
7442 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7443 }
7444 
7445 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7446 {
7447 	struct mgmt_ev_discovering ev;
7448 
7449 	BT_DBG("%s discovering %u", hdev->name, discovering);
7450 
7451 	memset(&ev, 0, sizeof(ev));
7452 	ev.type = hdev->discovery.type;
7453 	ev.discovering = discovering;
7454 
7455 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7456 }
7457 
7458 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7459 {
7460 	BT_DBG("%s status %u", hdev->name, status);
7461 }
7462 
7463 void mgmt_reenable_advertising(struct hci_dev *hdev)
7464 {
7465 	struct hci_request req;
7466 
7467 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7468 		return;
7469 
7470 	hci_req_init(&req, hdev);
7471 	enable_advertising(&req);
7472 	hci_req_run(&req, adv_enable_complete);
7473 }
7474