xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 206204a1)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "smp.h"
36 
37 #define MGMT_VERSION	1
38 #define MGMT_REVISION	6
39 
40 static const u16 mgmt_commands[] = {
41 	MGMT_OP_READ_INDEX_LIST,
42 	MGMT_OP_READ_INFO,
43 	MGMT_OP_SET_POWERED,
44 	MGMT_OP_SET_DISCOVERABLE,
45 	MGMT_OP_SET_CONNECTABLE,
46 	MGMT_OP_SET_FAST_CONNECTABLE,
47 	MGMT_OP_SET_PAIRABLE,
48 	MGMT_OP_SET_LINK_SECURITY,
49 	MGMT_OP_SET_SSP,
50 	MGMT_OP_SET_HS,
51 	MGMT_OP_SET_LE,
52 	MGMT_OP_SET_DEV_CLASS,
53 	MGMT_OP_SET_LOCAL_NAME,
54 	MGMT_OP_ADD_UUID,
55 	MGMT_OP_REMOVE_UUID,
56 	MGMT_OP_LOAD_LINK_KEYS,
57 	MGMT_OP_LOAD_LONG_TERM_KEYS,
58 	MGMT_OP_DISCONNECT,
59 	MGMT_OP_GET_CONNECTIONS,
60 	MGMT_OP_PIN_CODE_REPLY,
61 	MGMT_OP_PIN_CODE_NEG_REPLY,
62 	MGMT_OP_SET_IO_CAPABILITY,
63 	MGMT_OP_PAIR_DEVICE,
64 	MGMT_OP_CANCEL_PAIR_DEVICE,
65 	MGMT_OP_UNPAIR_DEVICE,
66 	MGMT_OP_USER_CONFIRM_REPLY,
67 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 	MGMT_OP_USER_PASSKEY_REPLY,
69 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 	MGMT_OP_READ_LOCAL_OOB_DATA,
71 	MGMT_OP_ADD_REMOTE_OOB_DATA,
72 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 	MGMT_OP_START_DISCOVERY,
74 	MGMT_OP_STOP_DISCOVERY,
75 	MGMT_OP_CONFIRM_NAME,
76 	MGMT_OP_BLOCK_DEVICE,
77 	MGMT_OP_UNBLOCK_DEVICE,
78 	MGMT_OP_SET_DEVICE_ID,
79 	MGMT_OP_SET_ADVERTISING,
80 	MGMT_OP_SET_BREDR,
81 	MGMT_OP_SET_STATIC_ADDRESS,
82 	MGMT_OP_SET_SCAN_PARAMS,
83 	MGMT_OP_SET_SECURE_CONN,
84 	MGMT_OP_SET_DEBUG_KEYS,
85 	MGMT_OP_SET_PRIVACY,
86 	MGMT_OP_LOAD_IRKS,
87 	MGMT_OP_GET_CONN_INFO,
88 };
89 
90 static const u16 mgmt_events[] = {
91 	MGMT_EV_CONTROLLER_ERROR,
92 	MGMT_EV_INDEX_ADDED,
93 	MGMT_EV_INDEX_REMOVED,
94 	MGMT_EV_NEW_SETTINGS,
95 	MGMT_EV_CLASS_OF_DEV_CHANGED,
96 	MGMT_EV_LOCAL_NAME_CHANGED,
97 	MGMT_EV_NEW_LINK_KEY,
98 	MGMT_EV_NEW_LONG_TERM_KEY,
99 	MGMT_EV_DEVICE_CONNECTED,
100 	MGMT_EV_DEVICE_DISCONNECTED,
101 	MGMT_EV_CONNECT_FAILED,
102 	MGMT_EV_PIN_CODE_REQUEST,
103 	MGMT_EV_USER_CONFIRM_REQUEST,
104 	MGMT_EV_USER_PASSKEY_REQUEST,
105 	MGMT_EV_AUTH_FAILED,
106 	MGMT_EV_DEVICE_FOUND,
107 	MGMT_EV_DISCOVERING,
108 	MGMT_EV_DEVICE_BLOCKED,
109 	MGMT_EV_DEVICE_UNBLOCKED,
110 	MGMT_EV_DEVICE_UNPAIRED,
111 	MGMT_EV_PASSKEY_NOTIFY,
112 	MGMT_EV_NEW_IRK,
113 	MGMT_EV_NEW_CSRK,
114 };
115 
116 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
117 
118 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
119 				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
120 
121 struct pending_cmd {
122 	struct list_head list;
123 	u16 opcode;
124 	int index;
125 	void *param;
126 	struct sock *sk;
127 	void *user_data;
128 };
129 
130 /* HCI to MGMT error code conversion table */
131 static u8 mgmt_status_table[] = {
132 	MGMT_STATUS_SUCCESS,
133 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
134 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
135 	MGMT_STATUS_FAILED,		/* Hardware Failure */
136 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
137 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
138 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
139 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
140 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
141 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
142 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
143 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
144 	MGMT_STATUS_BUSY,		/* Command Disallowed */
145 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
146 	MGMT_STATUS_REJECTED,		/* Rejected Security */
147 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
148 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
149 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
150 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
151 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
152 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
153 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
154 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
155 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
156 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
157 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
158 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
159 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
160 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
161 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
162 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
163 	MGMT_STATUS_FAILED,		/* Unspecified Error */
164 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
165 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
166 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
167 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
168 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
169 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
170 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
171 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
172 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
173 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
174 	MGMT_STATUS_FAILED,		/* Transaction Collision */
175 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
176 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
177 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
178 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
179 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
180 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
181 	MGMT_STATUS_FAILED,		/* Slot Violation */
182 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
183 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
184 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
185 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
186 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
187 	MGMT_STATUS_BUSY,		/* Controller Busy */
188 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
189 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
190 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
191 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
192 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
193 };
194 
195 static u8 mgmt_status(u8 hci_status)
196 {
197 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
198 		return mgmt_status_table[hci_status];
199 
200 	return MGMT_STATUS_FAILED;
201 }
202 
203 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
204 {
205 	struct sk_buff *skb;
206 	struct mgmt_hdr *hdr;
207 	struct mgmt_ev_cmd_status *ev;
208 	int err;
209 
210 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
211 
212 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
213 	if (!skb)
214 		return -ENOMEM;
215 
216 	hdr = (void *) skb_put(skb, sizeof(*hdr));
217 
218 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
219 	hdr->index = cpu_to_le16(index);
220 	hdr->len = cpu_to_le16(sizeof(*ev));
221 
222 	ev = (void *) skb_put(skb, sizeof(*ev));
223 	ev->status = status;
224 	ev->opcode = cpu_to_le16(cmd);
225 
226 	err = sock_queue_rcv_skb(sk, skb);
227 	if (err < 0)
228 		kfree_skb(skb);
229 
230 	return err;
231 }
232 
233 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
234 			void *rp, size_t rp_len)
235 {
236 	struct sk_buff *skb;
237 	struct mgmt_hdr *hdr;
238 	struct mgmt_ev_cmd_complete *ev;
239 	int err;
240 
241 	BT_DBG("sock %p", sk);
242 
243 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
244 	if (!skb)
245 		return -ENOMEM;
246 
247 	hdr = (void *) skb_put(skb, sizeof(*hdr));
248 
249 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
250 	hdr->index = cpu_to_le16(index);
251 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
252 
253 	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
254 	ev->opcode = cpu_to_le16(cmd);
255 	ev->status = status;
256 
257 	if (rp)
258 		memcpy(ev->data, rp, rp_len);
259 
260 	err = sock_queue_rcv_skb(sk, skb);
261 	if (err < 0)
262 		kfree_skb(skb);
263 
264 	return err;
265 }
266 
267 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
268 			u16 data_len)
269 {
270 	struct mgmt_rp_read_version rp;
271 
272 	BT_DBG("sock %p", sk);
273 
274 	rp.version = MGMT_VERSION;
275 	rp.revision = cpu_to_le16(MGMT_REVISION);
276 
277 	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
278 			    sizeof(rp));
279 }
280 
281 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
282 			 u16 data_len)
283 {
284 	struct mgmt_rp_read_commands *rp;
285 	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
286 	const u16 num_events = ARRAY_SIZE(mgmt_events);
287 	__le16 *opcode;
288 	size_t rp_size;
289 	int i, err;
290 
291 	BT_DBG("sock %p", sk);
292 
293 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
294 
295 	rp = kmalloc(rp_size, GFP_KERNEL);
296 	if (!rp)
297 		return -ENOMEM;
298 
299 	rp->num_commands = cpu_to_le16(num_commands);
300 	rp->num_events = cpu_to_le16(num_events);
301 
302 	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
303 		put_unaligned_le16(mgmt_commands[i], opcode);
304 
305 	for (i = 0; i < num_events; i++, opcode++)
306 		put_unaligned_le16(mgmt_events[i], opcode);
307 
308 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
309 			   rp_size);
310 	kfree(rp);
311 
312 	return err;
313 }
314 
315 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
316 			   u16 data_len)
317 {
318 	struct mgmt_rp_read_index_list *rp;
319 	struct hci_dev *d;
320 	size_t rp_len;
321 	u16 count;
322 	int err;
323 
324 	BT_DBG("sock %p", sk);
325 
326 	read_lock(&hci_dev_list_lock);
327 
328 	count = 0;
329 	list_for_each_entry(d, &hci_dev_list, list) {
330 		if (d->dev_type == HCI_BREDR)
331 			count++;
332 	}
333 
334 	rp_len = sizeof(*rp) + (2 * count);
335 	rp = kmalloc(rp_len, GFP_ATOMIC);
336 	if (!rp) {
337 		read_unlock(&hci_dev_list_lock);
338 		return -ENOMEM;
339 	}
340 
341 	count = 0;
342 	list_for_each_entry(d, &hci_dev_list, list) {
343 		if (test_bit(HCI_SETUP, &d->dev_flags))
344 			continue;
345 
346 		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
347 			continue;
348 
349 		if (d->dev_type == HCI_BREDR) {
350 			rp->index[count++] = cpu_to_le16(d->id);
351 			BT_DBG("Added hci%u", d->id);
352 		}
353 	}
354 
355 	rp->num_controllers = cpu_to_le16(count);
356 	rp_len = sizeof(*rp) + (2 * count);
357 
358 	read_unlock(&hci_dev_list_lock);
359 
360 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
361 			   rp_len);
362 
363 	kfree(rp);
364 
365 	return err;
366 }
367 
368 static u32 get_supported_settings(struct hci_dev *hdev)
369 {
370 	u32 settings = 0;
371 
372 	settings |= MGMT_SETTING_POWERED;
373 	settings |= MGMT_SETTING_PAIRABLE;
374 	settings |= MGMT_SETTING_DEBUG_KEYS;
375 
376 	if (lmp_bredr_capable(hdev)) {
377 		settings |= MGMT_SETTING_CONNECTABLE;
378 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 		settings |= MGMT_SETTING_DISCOVERABLE;
381 		settings |= MGMT_SETTING_BREDR;
382 		settings |= MGMT_SETTING_LINK_SECURITY;
383 
384 		if (lmp_ssp_capable(hdev)) {
385 			settings |= MGMT_SETTING_SSP;
386 			settings |= MGMT_SETTING_HS;
387 		}
388 
389 		if (lmp_sc_capable(hdev) ||
390 		    test_bit(HCI_FORCE_SC, &hdev->dev_flags))
391 			settings |= MGMT_SETTING_SECURE_CONN;
392 	}
393 
394 	if (lmp_le_capable(hdev)) {
395 		settings |= MGMT_SETTING_LE;
396 		settings |= MGMT_SETTING_ADVERTISING;
397 		settings |= MGMT_SETTING_PRIVACY;
398 	}
399 
400 	return settings;
401 }
402 
403 static u32 get_current_settings(struct hci_dev *hdev)
404 {
405 	u32 settings = 0;
406 
407 	if (hdev_is_powered(hdev))
408 		settings |= MGMT_SETTING_POWERED;
409 
410 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 		settings |= MGMT_SETTING_CONNECTABLE;
412 
413 	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
415 
416 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 		settings |= MGMT_SETTING_DISCOVERABLE;
418 
419 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 		settings |= MGMT_SETTING_PAIRABLE;
421 
422 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
423 		settings |= MGMT_SETTING_BREDR;
424 
425 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 		settings |= MGMT_SETTING_LE;
427 
428 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 		settings |= MGMT_SETTING_LINK_SECURITY;
430 
431 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 		settings |= MGMT_SETTING_SSP;
433 
434 	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 		settings |= MGMT_SETTING_HS;
436 
437 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
438 		settings |= MGMT_SETTING_ADVERTISING;
439 
440 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 		settings |= MGMT_SETTING_SECURE_CONN;
442 
443 	if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
444 		settings |= MGMT_SETTING_DEBUG_KEYS;
445 
446 	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
447 		settings |= MGMT_SETTING_PRIVACY;
448 
449 	return settings;
450 }
451 
452 #define PNP_INFO_SVCLASS_ID		0x1200
453 
454 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
455 {
456 	u8 *ptr = data, *uuids_start = NULL;
457 	struct bt_uuid *uuid;
458 
459 	if (len < 4)
460 		return ptr;
461 
462 	list_for_each_entry(uuid, &hdev->uuids, list) {
463 		u16 uuid16;
464 
465 		if (uuid->size != 16)
466 			continue;
467 
468 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
469 		if (uuid16 < 0x1100)
470 			continue;
471 
472 		if (uuid16 == PNP_INFO_SVCLASS_ID)
473 			continue;
474 
475 		if (!uuids_start) {
476 			uuids_start = ptr;
477 			uuids_start[0] = 1;
478 			uuids_start[1] = EIR_UUID16_ALL;
479 			ptr += 2;
480 		}
481 
482 		/* Stop if not enough space to put next UUID */
483 		if ((ptr - data) + sizeof(u16) > len) {
484 			uuids_start[1] = EIR_UUID16_SOME;
485 			break;
486 		}
487 
488 		*ptr++ = (uuid16 & 0x00ff);
489 		*ptr++ = (uuid16 & 0xff00) >> 8;
490 		uuids_start[0] += sizeof(uuid16);
491 	}
492 
493 	return ptr;
494 }
495 
496 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
497 {
498 	u8 *ptr = data, *uuids_start = NULL;
499 	struct bt_uuid *uuid;
500 
501 	if (len < 6)
502 		return ptr;
503 
504 	list_for_each_entry(uuid, &hdev->uuids, list) {
505 		if (uuid->size != 32)
506 			continue;
507 
508 		if (!uuids_start) {
509 			uuids_start = ptr;
510 			uuids_start[0] = 1;
511 			uuids_start[1] = EIR_UUID32_ALL;
512 			ptr += 2;
513 		}
514 
515 		/* Stop if not enough space to put next UUID */
516 		if ((ptr - data) + sizeof(u32) > len) {
517 			uuids_start[1] = EIR_UUID32_SOME;
518 			break;
519 		}
520 
521 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
522 		ptr += sizeof(u32);
523 		uuids_start[0] += sizeof(u32);
524 	}
525 
526 	return ptr;
527 }
528 
529 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
530 {
531 	u8 *ptr = data, *uuids_start = NULL;
532 	struct bt_uuid *uuid;
533 
534 	if (len < 18)
535 		return ptr;
536 
537 	list_for_each_entry(uuid, &hdev->uuids, list) {
538 		if (uuid->size != 128)
539 			continue;
540 
541 		if (!uuids_start) {
542 			uuids_start = ptr;
543 			uuids_start[0] = 1;
544 			uuids_start[1] = EIR_UUID128_ALL;
545 			ptr += 2;
546 		}
547 
548 		/* Stop if not enough space to put next UUID */
549 		if ((ptr - data) + 16 > len) {
550 			uuids_start[1] = EIR_UUID128_SOME;
551 			break;
552 		}
553 
554 		memcpy(ptr, uuid->uuid, 16);
555 		ptr += 16;
556 		uuids_start[0] += 16;
557 	}
558 
559 	return ptr;
560 }
561 
562 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
563 {
564 	struct pending_cmd *cmd;
565 
566 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
567 		if (cmd->opcode == opcode)
568 			return cmd;
569 	}
570 
571 	return NULL;
572 }
573 
574 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
575 {
576 	u8 ad_len = 0;
577 	size_t name_len;
578 
579 	name_len = strlen(hdev->dev_name);
580 	if (name_len > 0) {
581 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
582 
583 		if (name_len > max_len) {
584 			name_len = max_len;
585 			ptr[1] = EIR_NAME_SHORT;
586 		} else
587 			ptr[1] = EIR_NAME_COMPLETE;
588 
589 		ptr[0] = name_len + 1;
590 
591 		memcpy(ptr + 2, hdev->dev_name, name_len);
592 
593 		ad_len += (name_len + 2);
594 		ptr += (name_len + 2);
595 	}
596 
597 	return ad_len;
598 }
599 
600 static void update_scan_rsp_data(struct hci_request *req)
601 {
602 	struct hci_dev *hdev = req->hdev;
603 	struct hci_cp_le_set_scan_rsp_data cp;
604 	u8 len;
605 
606 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
607 		return;
608 
609 	memset(&cp, 0, sizeof(cp));
610 
611 	len = create_scan_rsp_data(hdev, cp.data);
612 
613 	if (hdev->scan_rsp_data_len == len &&
614 	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
615 		return;
616 
617 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
618 	hdev->scan_rsp_data_len = len;
619 
620 	cp.length = len;
621 
622 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
623 }
624 
625 static u8 get_adv_discov_flags(struct hci_dev *hdev)
626 {
627 	struct pending_cmd *cmd;
628 
629 	/* If there's a pending mgmt command the flags will not yet have
630 	 * their final values, so check for this first.
631 	 */
632 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
633 	if (cmd) {
634 		struct mgmt_mode *cp = cmd->param;
635 		if (cp->val == 0x01)
636 			return LE_AD_GENERAL;
637 		else if (cp->val == 0x02)
638 			return LE_AD_LIMITED;
639 	} else {
640 		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
641 			return LE_AD_LIMITED;
642 		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
643 			return LE_AD_GENERAL;
644 	}
645 
646 	return 0;
647 }
648 
649 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
650 {
651 	u8 ad_len = 0, flags = 0;
652 
653 	flags |= get_adv_discov_flags(hdev);
654 
655 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
656 		flags |= LE_AD_NO_BREDR;
657 
658 	if (flags) {
659 		BT_DBG("adv flags 0x%02x", flags);
660 
661 		ptr[0] = 2;
662 		ptr[1] = EIR_FLAGS;
663 		ptr[2] = flags;
664 
665 		ad_len += 3;
666 		ptr += 3;
667 	}
668 
669 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
670 		ptr[0] = 2;
671 		ptr[1] = EIR_TX_POWER;
672 		ptr[2] = (u8) hdev->adv_tx_power;
673 
674 		ad_len += 3;
675 		ptr += 3;
676 	}
677 
678 	return ad_len;
679 }
680 
681 static void update_adv_data(struct hci_request *req)
682 {
683 	struct hci_dev *hdev = req->hdev;
684 	struct hci_cp_le_set_adv_data cp;
685 	u8 len;
686 
687 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
688 		return;
689 
690 	memset(&cp, 0, sizeof(cp));
691 
692 	len = create_adv_data(hdev, cp.data);
693 
694 	if (hdev->adv_data_len == len &&
695 	    memcmp(cp.data, hdev->adv_data, len) == 0)
696 		return;
697 
698 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
699 	hdev->adv_data_len = len;
700 
701 	cp.length = len;
702 
703 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
704 }
705 
706 static void create_eir(struct hci_dev *hdev, u8 *data)
707 {
708 	u8 *ptr = data;
709 	size_t name_len;
710 
711 	name_len = strlen(hdev->dev_name);
712 
713 	if (name_len > 0) {
714 		/* EIR Data type */
715 		if (name_len > 48) {
716 			name_len = 48;
717 			ptr[1] = EIR_NAME_SHORT;
718 		} else
719 			ptr[1] = EIR_NAME_COMPLETE;
720 
721 		/* EIR Data length */
722 		ptr[0] = name_len + 1;
723 
724 		memcpy(ptr + 2, hdev->dev_name, name_len);
725 
726 		ptr += (name_len + 2);
727 	}
728 
729 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
730 		ptr[0] = 2;
731 		ptr[1] = EIR_TX_POWER;
732 		ptr[2] = (u8) hdev->inq_tx_power;
733 
734 		ptr += 3;
735 	}
736 
737 	if (hdev->devid_source > 0) {
738 		ptr[0] = 9;
739 		ptr[1] = EIR_DEVICE_ID;
740 
741 		put_unaligned_le16(hdev->devid_source, ptr + 2);
742 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
743 		put_unaligned_le16(hdev->devid_product, ptr + 6);
744 		put_unaligned_le16(hdev->devid_version, ptr + 8);
745 
746 		ptr += 10;
747 	}
748 
749 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
752 }
753 
754 static void update_eir(struct hci_request *req)
755 {
756 	struct hci_dev *hdev = req->hdev;
757 	struct hci_cp_write_eir cp;
758 
759 	if (!hdev_is_powered(hdev))
760 		return;
761 
762 	if (!lmp_ext_inq_capable(hdev))
763 		return;
764 
765 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
766 		return;
767 
768 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
769 		return;
770 
771 	memset(&cp, 0, sizeof(cp));
772 
773 	create_eir(hdev, cp.data);
774 
775 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
776 		return;
777 
778 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
779 
780 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
781 }
782 
783 static u8 get_service_classes(struct hci_dev *hdev)
784 {
785 	struct bt_uuid *uuid;
786 	u8 val = 0;
787 
788 	list_for_each_entry(uuid, &hdev->uuids, list)
789 		val |= uuid->svc_hint;
790 
791 	return val;
792 }
793 
794 static void update_class(struct hci_request *req)
795 {
796 	struct hci_dev *hdev = req->hdev;
797 	u8 cod[3];
798 
799 	BT_DBG("%s", hdev->name);
800 
801 	if (!hdev_is_powered(hdev))
802 		return;
803 
804 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
805 		return;
806 
807 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
808 		return;
809 
810 	cod[0] = hdev->minor_class;
811 	cod[1] = hdev->major_class;
812 	cod[2] = get_service_classes(hdev);
813 
814 	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
815 		cod[1] |= 0x20;
816 
817 	if (memcmp(cod, hdev->dev_class, 3) == 0)
818 		return;
819 
820 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
821 }
822 
823 static bool get_connectable(struct hci_dev *hdev)
824 {
825 	struct pending_cmd *cmd;
826 
827 	/* If there's a pending mgmt command the flag will not yet have
828 	 * it's final value, so check for this first.
829 	 */
830 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
831 	if (cmd) {
832 		struct mgmt_mode *cp = cmd->param;
833 		return cp->val;
834 	}
835 
836 	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
837 }
838 
839 static void enable_advertising(struct hci_request *req)
840 {
841 	struct hci_dev *hdev = req->hdev;
842 	struct hci_cp_le_set_adv_param cp;
843 	u8 own_addr_type, enable = 0x01;
844 	bool connectable;
845 
846 	/* Clear the HCI_ADVERTISING bit temporarily so that the
847 	 * hci_update_random_address knows that it's safe to go ahead
848 	 * and write a new random address. The flag will be set back on
849 	 * as soon as the SET_ADV_ENABLE HCI command completes.
850 	 */
851 	clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
852 
853 	connectable = get_connectable(hdev);
854 
855 	/* Set require_privacy to true only when non-connectable
856 	 * advertising is used. In that case it is fine to use a
857 	 * non-resolvable private address.
858 	 */
859 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
860 		return;
861 
862 	memset(&cp, 0, sizeof(cp));
863 	cp.min_interval = cpu_to_le16(0x0800);
864 	cp.max_interval = cpu_to_le16(0x0800);
865 	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
866 	cp.own_address_type = own_addr_type;
867 	cp.channel_map = hdev->le_adv_channel_map;
868 
869 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
870 
871 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
872 }
873 
874 static void disable_advertising(struct hci_request *req)
875 {
876 	u8 enable = 0x00;
877 
878 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
879 }
880 
881 static void service_cache_off(struct work_struct *work)
882 {
883 	struct hci_dev *hdev = container_of(work, struct hci_dev,
884 					    service_cache.work);
885 	struct hci_request req;
886 
887 	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
888 		return;
889 
890 	hci_req_init(&req, hdev);
891 
892 	hci_dev_lock(hdev);
893 
894 	update_eir(&req);
895 	update_class(&req);
896 
897 	hci_dev_unlock(hdev);
898 
899 	hci_req_run(&req, NULL);
900 }
901 
902 static void rpa_expired(struct work_struct *work)
903 {
904 	struct hci_dev *hdev = container_of(work, struct hci_dev,
905 					    rpa_expired.work);
906 	struct hci_request req;
907 
908 	BT_DBG("");
909 
910 	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
911 
912 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
913 	    hci_conn_num(hdev, LE_LINK) > 0)
914 		return;
915 
916 	/* The generation of a new RPA and programming it into the
917 	 * controller happens in the enable_advertising() function.
918 	 */
919 
920 	hci_req_init(&req, hdev);
921 
922 	disable_advertising(&req);
923 	enable_advertising(&req);
924 
925 	hci_req_run(&req, NULL);
926 }
927 
928 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
929 {
930 	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
931 		return;
932 
933 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
934 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
935 
936 	/* Non-mgmt controlled devices get this bit set
937 	 * implicitly so that pairing works for them, however
938 	 * for mgmt we require user-space to explicitly enable
939 	 * it
940 	 */
941 	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
942 }
943 
944 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
945 				void *data, u16 data_len)
946 {
947 	struct mgmt_rp_read_info rp;
948 
949 	BT_DBG("sock %p %s", sk, hdev->name);
950 
951 	hci_dev_lock(hdev);
952 
953 	memset(&rp, 0, sizeof(rp));
954 
955 	bacpy(&rp.bdaddr, &hdev->bdaddr);
956 
957 	rp.version = hdev->hci_ver;
958 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
959 
960 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
961 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
962 
963 	memcpy(rp.dev_class, hdev->dev_class, 3);
964 
965 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
966 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
967 
968 	hci_dev_unlock(hdev);
969 
970 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
971 			    sizeof(rp));
972 }
973 
974 static void mgmt_pending_free(struct pending_cmd *cmd)
975 {
976 	sock_put(cmd->sk);
977 	kfree(cmd->param);
978 	kfree(cmd);
979 }
980 
981 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
982 					    struct hci_dev *hdev, void *data,
983 					    u16 len)
984 {
985 	struct pending_cmd *cmd;
986 
987 	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
988 	if (!cmd)
989 		return NULL;
990 
991 	cmd->opcode = opcode;
992 	cmd->index = hdev->id;
993 
994 	cmd->param = kmalloc(len, GFP_KERNEL);
995 	if (!cmd->param) {
996 		kfree(cmd);
997 		return NULL;
998 	}
999 
1000 	if (data)
1001 		memcpy(cmd->param, data, len);
1002 
1003 	cmd->sk = sk;
1004 	sock_hold(sk);
1005 
1006 	list_add(&cmd->list, &hdev->mgmt_pending);
1007 
1008 	return cmd;
1009 }
1010 
1011 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1012 				 void (*cb)(struct pending_cmd *cmd,
1013 					    void *data),
1014 				 void *data)
1015 {
1016 	struct pending_cmd *cmd, *tmp;
1017 
1018 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1019 		if (opcode > 0 && cmd->opcode != opcode)
1020 			continue;
1021 
1022 		cb(cmd, data);
1023 	}
1024 }
1025 
1026 static void mgmt_pending_remove(struct pending_cmd *cmd)
1027 {
1028 	list_del(&cmd->list);
1029 	mgmt_pending_free(cmd);
1030 }
1031 
1032 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1033 {
1034 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1035 
1036 	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1037 			    sizeof(settings));
1038 }
1039 
1040 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1041 {
1042 	BT_DBG("%s status 0x%02x", hdev->name, status);
1043 
1044 	if (hci_conn_count(hdev) == 0) {
1045 		cancel_delayed_work(&hdev->power_off);
1046 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1047 	}
1048 }
1049 
1050 static int clean_up_hci_state(struct hci_dev *hdev)
1051 {
1052 	struct hci_request req;
1053 	struct hci_conn *conn;
1054 
1055 	hci_req_init(&req, hdev);
1056 
1057 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1058 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1059 		u8 scan = 0x00;
1060 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1061 	}
1062 
1063 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1064 		disable_advertising(&req);
1065 
1066 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1067 		hci_req_add_le_scan_disable(&req);
1068 	}
1069 
1070 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1071 		struct hci_cp_disconnect dc;
1072 		struct hci_cp_reject_conn_req rej;
1073 
1074 		switch (conn->state) {
1075 		case BT_CONNECTED:
1076 		case BT_CONFIG:
1077 			dc.handle = cpu_to_le16(conn->handle);
1078 			dc.reason = 0x15; /* Terminated due to Power Off */
1079 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1080 			break;
1081 		case BT_CONNECT:
1082 			if (conn->type == LE_LINK)
1083 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1084 					    0, NULL);
1085 			else if (conn->type == ACL_LINK)
1086 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1087 					    6, &conn->dst);
1088 			break;
1089 		case BT_CONNECT2:
1090 			bacpy(&rej.bdaddr, &conn->dst);
1091 			rej.reason = 0x15; /* Terminated due to Power Off */
1092 			if (conn->type == ACL_LINK)
1093 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1094 					    sizeof(rej), &rej);
1095 			else if (conn->type == SCO_LINK)
1096 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1097 					    sizeof(rej), &rej);
1098 			break;
1099 		}
1100 	}
1101 
1102 	return hci_req_run(&req, clean_up_hci_complete);
1103 }
1104 
1105 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1106 		       u16 len)
1107 {
1108 	struct mgmt_mode *cp = data;
1109 	struct pending_cmd *cmd;
1110 	int err;
1111 
1112 	BT_DBG("request for %s", hdev->name);
1113 
1114 	if (cp->val != 0x00 && cp->val != 0x01)
1115 		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1116 				  MGMT_STATUS_INVALID_PARAMS);
1117 
1118 	hci_dev_lock(hdev);
1119 
1120 	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1121 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1122 				 MGMT_STATUS_BUSY);
1123 		goto failed;
1124 	}
1125 
1126 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1127 		cancel_delayed_work(&hdev->power_off);
1128 
1129 		if (cp->val) {
1130 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1131 					 data, len);
1132 			err = mgmt_powered(hdev, 1);
1133 			goto failed;
1134 		}
1135 	}
1136 
1137 	if (!!cp->val == hdev_is_powered(hdev)) {
1138 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1139 		goto failed;
1140 	}
1141 
1142 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1143 	if (!cmd) {
1144 		err = -ENOMEM;
1145 		goto failed;
1146 	}
1147 
1148 	if (cp->val) {
1149 		queue_work(hdev->req_workqueue, &hdev->power_on);
1150 		err = 0;
1151 	} else {
1152 		/* Disconnect connections, stop scans, etc */
1153 		err = clean_up_hci_state(hdev);
1154 		if (!err)
1155 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1156 					   HCI_POWER_OFF_TIMEOUT);
1157 
1158 		/* ENODATA means there were no HCI commands queued */
1159 		if (err == -ENODATA) {
1160 			cancel_delayed_work(&hdev->power_off);
1161 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1162 			err = 0;
1163 		}
1164 	}
1165 
1166 failed:
1167 	hci_dev_unlock(hdev);
1168 	return err;
1169 }
1170 
1171 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1172 		      struct sock *skip_sk)
1173 {
1174 	struct sk_buff *skb;
1175 	struct mgmt_hdr *hdr;
1176 
1177 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1178 	if (!skb)
1179 		return -ENOMEM;
1180 
1181 	hdr = (void *) skb_put(skb, sizeof(*hdr));
1182 	hdr->opcode = cpu_to_le16(event);
1183 	if (hdev)
1184 		hdr->index = cpu_to_le16(hdev->id);
1185 	else
1186 		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1187 	hdr->len = cpu_to_le16(data_len);
1188 
1189 	if (data)
1190 		memcpy(skb_put(skb, data_len), data, data_len);
1191 
1192 	/* Time stamp */
1193 	__net_timestamp(skb);
1194 
1195 	hci_send_to_control(skb, skip_sk);
1196 	kfree_skb(skb);
1197 
1198 	return 0;
1199 }
1200 
1201 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1202 {
1203 	__le32 ev;
1204 
1205 	ev = cpu_to_le32(get_current_settings(hdev));
1206 
1207 	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1208 }
1209 
1210 struct cmd_lookup {
1211 	struct sock *sk;
1212 	struct hci_dev *hdev;
1213 	u8 mgmt_status;
1214 };
1215 
1216 static void settings_rsp(struct pending_cmd *cmd, void *data)
1217 {
1218 	struct cmd_lookup *match = data;
1219 
1220 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1221 
1222 	list_del(&cmd->list);
1223 
1224 	if (match->sk == NULL) {
1225 		match->sk = cmd->sk;
1226 		sock_hold(match->sk);
1227 	}
1228 
1229 	mgmt_pending_free(cmd);
1230 }
1231 
1232 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1233 {
1234 	u8 *status = data;
1235 
1236 	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1237 	mgmt_pending_remove(cmd);
1238 }
1239 
1240 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1241 {
1242 	if (!lmp_bredr_capable(hdev))
1243 		return MGMT_STATUS_NOT_SUPPORTED;
1244 	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1245 		return MGMT_STATUS_REJECTED;
1246 	else
1247 		return MGMT_STATUS_SUCCESS;
1248 }
1249 
1250 static u8 mgmt_le_support(struct hci_dev *hdev)
1251 {
1252 	if (!lmp_le_capable(hdev))
1253 		return MGMT_STATUS_NOT_SUPPORTED;
1254 	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1255 		return MGMT_STATUS_REJECTED;
1256 	else
1257 		return MGMT_STATUS_SUCCESS;
1258 }
1259 
1260 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1261 {
1262 	struct pending_cmd *cmd;
1263 	struct mgmt_mode *cp;
1264 	struct hci_request req;
1265 	bool changed;
1266 
1267 	BT_DBG("status 0x%02x", status);
1268 
1269 	hci_dev_lock(hdev);
1270 
1271 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1272 	if (!cmd)
1273 		goto unlock;
1274 
1275 	if (status) {
1276 		u8 mgmt_err = mgmt_status(status);
1277 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1278 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1279 		goto remove_cmd;
1280 	}
1281 
1282 	cp = cmd->param;
1283 	if (cp->val) {
1284 		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1285 					    &hdev->dev_flags);
1286 
1287 		if (hdev->discov_timeout > 0) {
1288 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1289 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1290 					   to);
1291 		}
1292 	} else {
1293 		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1294 					     &hdev->dev_flags);
1295 	}
1296 
1297 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1298 
1299 	if (changed)
1300 		new_settings(hdev, cmd->sk);
1301 
1302 	/* When the discoverable mode gets changed, make sure
1303 	 * that class of device has the limited discoverable
1304 	 * bit correctly set.
1305 	 */
1306 	hci_req_init(&req, hdev);
1307 	update_class(&req);
1308 	hci_req_run(&req, NULL);
1309 
1310 remove_cmd:
1311 	mgmt_pending_remove(cmd);
1312 
1313 unlock:
1314 	hci_dev_unlock(hdev);
1315 }
1316 
1317 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1318 			    u16 len)
1319 {
1320 	struct mgmt_cp_set_discoverable *cp = data;
1321 	struct pending_cmd *cmd;
1322 	struct hci_request req;
1323 	u16 timeout;
1324 	u8 scan;
1325 	int err;
1326 
1327 	BT_DBG("request for %s", hdev->name);
1328 
1329 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1330 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1331 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1332 				  MGMT_STATUS_REJECTED);
1333 
1334 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1335 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1336 				  MGMT_STATUS_INVALID_PARAMS);
1337 
1338 	timeout = __le16_to_cpu(cp->timeout);
1339 
1340 	/* Disabling discoverable requires that no timeout is set,
1341 	 * and enabling limited discoverable requires a timeout.
1342 	 */
1343 	if ((cp->val == 0x00 && timeout > 0) ||
1344 	    (cp->val == 0x02 && timeout == 0))
1345 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1346 				  MGMT_STATUS_INVALID_PARAMS);
1347 
1348 	hci_dev_lock(hdev);
1349 
1350 	if (!hdev_is_powered(hdev) && timeout > 0) {
1351 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1352 				 MGMT_STATUS_NOT_POWERED);
1353 		goto failed;
1354 	}
1355 
1356 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1357 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1358 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1359 				 MGMT_STATUS_BUSY);
1360 		goto failed;
1361 	}
1362 
1363 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1364 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1365 				 MGMT_STATUS_REJECTED);
1366 		goto failed;
1367 	}
1368 
1369 	if (!hdev_is_powered(hdev)) {
1370 		bool changed = false;
1371 
1372 		/* Setting limited discoverable when powered off is
1373 		 * not a valid operation since it requires a timeout
1374 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1375 		 */
1376 		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1377 			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1378 			changed = true;
1379 		}
1380 
1381 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1382 		if (err < 0)
1383 			goto failed;
1384 
1385 		if (changed)
1386 			err = new_settings(hdev, sk);
1387 
1388 		goto failed;
1389 	}
1390 
1391 	/* If the current mode is the same, then just update the timeout
1392 	 * value with the new value. And if only the timeout gets updated,
1393 	 * then no need for any HCI transactions.
1394 	 */
1395 	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1396 	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1397 					  &hdev->dev_flags)) {
1398 		cancel_delayed_work(&hdev->discov_off);
1399 		hdev->discov_timeout = timeout;
1400 
1401 		if (cp->val && hdev->discov_timeout > 0) {
1402 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1403 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1404 					   to);
1405 		}
1406 
1407 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1408 		goto failed;
1409 	}
1410 
1411 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1412 	if (!cmd) {
1413 		err = -ENOMEM;
1414 		goto failed;
1415 	}
1416 
1417 	/* Cancel any potential discoverable timeout that might be
1418 	 * still active and store new timeout value. The arming of
1419 	 * the timeout happens in the complete handler.
1420 	 */
1421 	cancel_delayed_work(&hdev->discov_off);
1422 	hdev->discov_timeout = timeout;
1423 
1424 	/* Limited discoverable mode */
1425 	if (cp->val == 0x02)
1426 		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1427 	else
1428 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1429 
1430 	hci_req_init(&req, hdev);
1431 
1432 	/* The procedure for LE-only controllers is much simpler - just
1433 	 * update the advertising data.
1434 	 */
1435 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1436 		goto update_ad;
1437 
1438 	scan = SCAN_PAGE;
1439 
1440 	if (cp->val) {
1441 		struct hci_cp_write_current_iac_lap hci_cp;
1442 
1443 		if (cp->val == 0x02) {
1444 			/* Limited discoverable mode */
1445 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1446 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1447 			hci_cp.iac_lap[1] = 0x8b;
1448 			hci_cp.iac_lap[2] = 0x9e;
1449 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1450 			hci_cp.iac_lap[4] = 0x8b;
1451 			hci_cp.iac_lap[5] = 0x9e;
1452 		} else {
1453 			/* General discoverable mode */
1454 			hci_cp.num_iac = 1;
1455 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1456 			hci_cp.iac_lap[1] = 0x8b;
1457 			hci_cp.iac_lap[2] = 0x9e;
1458 		}
1459 
1460 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1461 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1462 
1463 		scan |= SCAN_INQUIRY;
1464 	} else {
1465 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1466 	}
1467 
1468 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1469 
1470 update_ad:
1471 	update_adv_data(&req);
1472 
1473 	err = hci_req_run(&req, set_discoverable_complete);
1474 	if (err < 0)
1475 		mgmt_pending_remove(cmd);
1476 
1477 failed:
1478 	hci_dev_unlock(hdev);
1479 	return err;
1480 }
1481 
1482 static void write_fast_connectable(struct hci_request *req, bool enable)
1483 {
1484 	struct hci_dev *hdev = req->hdev;
1485 	struct hci_cp_write_page_scan_activity acp;
1486 	u8 type;
1487 
1488 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1489 		return;
1490 
1491 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1492 		return;
1493 
1494 	if (enable) {
1495 		type = PAGE_SCAN_TYPE_INTERLACED;
1496 
1497 		/* 160 msec page scan interval */
1498 		acp.interval = cpu_to_le16(0x0100);
1499 	} else {
1500 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1501 
1502 		/* default 1.28 sec page scan */
1503 		acp.interval = cpu_to_le16(0x0800);
1504 	}
1505 
1506 	acp.window = cpu_to_le16(0x0012);
1507 
1508 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1509 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1510 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1511 			    sizeof(acp), &acp);
1512 
1513 	if (hdev->page_scan_type != type)
1514 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1515 }
1516 
1517 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1518 {
1519 	struct pending_cmd *cmd;
1520 	struct mgmt_mode *cp;
1521 	bool changed;
1522 
1523 	BT_DBG("status 0x%02x", status);
1524 
1525 	hci_dev_lock(hdev);
1526 
1527 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1528 	if (!cmd)
1529 		goto unlock;
1530 
1531 	if (status) {
1532 		u8 mgmt_err = mgmt_status(status);
1533 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1534 		goto remove_cmd;
1535 	}
1536 
1537 	cp = cmd->param;
1538 	if (cp->val)
1539 		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1540 	else
1541 		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1542 
1543 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1544 
1545 	if (changed)
1546 		new_settings(hdev, cmd->sk);
1547 
1548 remove_cmd:
1549 	mgmt_pending_remove(cmd);
1550 
1551 unlock:
1552 	hci_dev_unlock(hdev);
1553 }
1554 
1555 static int set_connectable_update_settings(struct hci_dev *hdev,
1556 					   struct sock *sk, u8 val)
1557 {
1558 	bool changed = false;
1559 	int err;
1560 
1561 	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1562 		changed = true;
1563 
1564 	if (val) {
1565 		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1566 	} else {
1567 		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1568 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1569 	}
1570 
1571 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1572 	if (err < 0)
1573 		return err;
1574 
1575 	if (changed)
1576 		return new_settings(hdev, sk);
1577 
1578 	return 0;
1579 }
1580 
1581 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1582 			   u16 len)
1583 {
1584 	struct mgmt_mode *cp = data;
1585 	struct pending_cmd *cmd;
1586 	struct hci_request req;
1587 	u8 scan;
1588 	int err;
1589 
1590 	BT_DBG("request for %s", hdev->name);
1591 
1592 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1593 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1595 				  MGMT_STATUS_REJECTED);
1596 
1597 	if (cp->val != 0x00 && cp->val != 0x01)
1598 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1599 				  MGMT_STATUS_INVALID_PARAMS);
1600 
1601 	hci_dev_lock(hdev);
1602 
1603 	if (!hdev_is_powered(hdev)) {
1604 		err = set_connectable_update_settings(hdev, sk, cp->val);
1605 		goto failed;
1606 	}
1607 
1608 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1609 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1610 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1611 				 MGMT_STATUS_BUSY);
1612 		goto failed;
1613 	}
1614 
1615 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1616 	if (!cmd) {
1617 		err = -ENOMEM;
1618 		goto failed;
1619 	}
1620 
1621 	hci_req_init(&req, hdev);
1622 
1623 	/* If BR/EDR is not enabled and we disable advertising as a
1624 	 * by-product of disabling connectable, we need to update the
1625 	 * advertising flags.
1626 	 */
1627 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1628 		if (!cp->val) {
1629 			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1630 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1631 		}
1632 		update_adv_data(&req);
1633 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1634 		if (cp->val) {
1635 			scan = SCAN_PAGE;
1636 		} else {
1637 			scan = 0;
1638 
1639 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1640 			    hdev->discov_timeout > 0)
1641 				cancel_delayed_work(&hdev->discov_off);
1642 		}
1643 
1644 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1645 	}
1646 
1647 	/* If we're going from non-connectable to connectable or
1648 	 * vice-versa when fast connectable is enabled ensure that fast
1649 	 * connectable gets disabled. write_fast_connectable won't do
1650 	 * anything if the page scan parameters are already what they
1651 	 * should be.
1652 	 */
1653 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1654 		write_fast_connectable(&req, false);
1655 
1656 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1657 	    hci_conn_num(hdev, LE_LINK) == 0) {
1658 		disable_advertising(&req);
1659 		enable_advertising(&req);
1660 	}
1661 
1662 	err = hci_req_run(&req, set_connectable_complete);
1663 	if (err < 0) {
1664 		mgmt_pending_remove(cmd);
1665 		if (err == -ENODATA)
1666 			err = set_connectable_update_settings(hdev, sk,
1667 							      cp->val);
1668 		goto failed;
1669 	}
1670 
1671 failed:
1672 	hci_dev_unlock(hdev);
1673 	return err;
1674 }
1675 
1676 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1677 			u16 len)
1678 {
1679 	struct mgmt_mode *cp = data;
1680 	bool changed;
1681 	int err;
1682 
1683 	BT_DBG("request for %s", hdev->name);
1684 
1685 	if (cp->val != 0x00 && cp->val != 0x01)
1686 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1687 				  MGMT_STATUS_INVALID_PARAMS);
1688 
1689 	hci_dev_lock(hdev);
1690 
1691 	if (cp->val)
1692 		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1693 	else
1694 		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1695 
1696 	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1697 	if (err < 0)
1698 		goto unlock;
1699 
1700 	if (changed)
1701 		err = new_settings(hdev, sk);
1702 
1703 unlock:
1704 	hci_dev_unlock(hdev);
1705 	return err;
1706 }
1707 
1708 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1709 			     u16 len)
1710 {
1711 	struct mgmt_mode *cp = data;
1712 	struct pending_cmd *cmd;
1713 	u8 val, status;
1714 	int err;
1715 
1716 	BT_DBG("request for %s", hdev->name);
1717 
1718 	status = mgmt_bredr_support(hdev);
1719 	if (status)
1720 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1721 				  status);
1722 
1723 	if (cp->val != 0x00 && cp->val != 0x01)
1724 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1725 				  MGMT_STATUS_INVALID_PARAMS);
1726 
1727 	hci_dev_lock(hdev);
1728 
1729 	if (!hdev_is_powered(hdev)) {
1730 		bool changed = false;
1731 
1732 		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1733 					  &hdev->dev_flags)) {
1734 			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1735 			changed = true;
1736 		}
1737 
1738 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1739 		if (err < 0)
1740 			goto failed;
1741 
1742 		if (changed)
1743 			err = new_settings(hdev, sk);
1744 
1745 		goto failed;
1746 	}
1747 
1748 	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1749 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1750 				 MGMT_STATUS_BUSY);
1751 		goto failed;
1752 	}
1753 
1754 	val = !!cp->val;
1755 
1756 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1757 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1758 		goto failed;
1759 	}
1760 
1761 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1762 	if (!cmd) {
1763 		err = -ENOMEM;
1764 		goto failed;
1765 	}
1766 
1767 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1768 	if (err < 0) {
1769 		mgmt_pending_remove(cmd);
1770 		goto failed;
1771 	}
1772 
1773 failed:
1774 	hci_dev_unlock(hdev);
1775 	return err;
1776 }
1777 
1778 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1779 {
1780 	struct mgmt_mode *cp = data;
1781 	struct pending_cmd *cmd;
1782 	u8 status;
1783 	int err;
1784 
1785 	BT_DBG("request for %s", hdev->name);
1786 
1787 	status = mgmt_bredr_support(hdev);
1788 	if (status)
1789 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1790 
1791 	if (!lmp_ssp_capable(hdev))
1792 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1793 				  MGMT_STATUS_NOT_SUPPORTED);
1794 
1795 	if (cp->val != 0x00 && cp->val != 0x01)
1796 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1797 				  MGMT_STATUS_INVALID_PARAMS);
1798 
1799 	hci_dev_lock(hdev);
1800 
1801 	if (!hdev_is_powered(hdev)) {
1802 		bool changed;
1803 
1804 		if (cp->val) {
1805 			changed = !test_and_set_bit(HCI_SSP_ENABLED,
1806 						    &hdev->dev_flags);
1807 		} else {
1808 			changed = test_and_clear_bit(HCI_SSP_ENABLED,
1809 						     &hdev->dev_flags);
1810 			if (!changed)
1811 				changed = test_and_clear_bit(HCI_HS_ENABLED,
1812 							     &hdev->dev_flags);
1813 			else
1814 				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1815 		}
1816 
1817 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1818 		if (err < 0)
1819 			goto failed;
1820 
1821 		if (changed)
1822 			err = new_settings(hdev, sk);
1823 
1824 		goto failed;
1825 	}
1826 
1827 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1828 	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1829 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1830 				 MGMT_STATUS_BUSY);
1831 		goto failed;
1832 	}
1833 
1834 	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1835 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1836 		goto failed;
1837 	}
1838 
1839 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1840 	if (!cmd) {
1841 		err = -ENOMEM;
1842 		goto failed;
1843 	}
1844 
1845 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1846 	if (err < 0) {
1847 		mgmt_pending_remove(cmd);
1848 		goto failed;
1849 	}
1850 
1851 failed:
1852 	hci_dev_unlock(hdev);
1853 	return err;
1854 }
1855 
1856 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1857 {
1858 	struct mgmt_mode *cp = data;
1859 	bool changed;
1860 	u8 status;
1861 	int err;
1862 
1863 	BT_DBG("request for %s", hdev->name);
1864 
1865 	status = mgmt_bredr_support(hdev);
1866 	if (status)
1867 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1868 
1869 	if (!lmp_ssp_capable(hdev))
1870 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1871 				  MGMT_STATUS_NOT_SUPPORTED);
1872 
1873 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1874 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1875 				  MGMT_STATUS_REJECTED);
1876 
1877 	if (cp->val != 0x00 && cp->val != 0x01)
1878 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1879 				  MGMT_STATUS_INVALID_PARAMS);
1880 
1881 	hci_dev_lock(hdev);
1882 
1883 	if (cp->val) {
1884 		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1885 	} else {
1886 		if (hdev_is_powered(hdev)) {
1887 			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1888 					 MGMT_STATUS_REJECTED);
1889 			goto unlock;
1890 		}
1891 
1892 		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1893 	}
1894 
1895 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1896 	if (err < 0)
1897 		goto unlock;
1898 
1899 	if (changed)
1900 		err = new_settings(hdev, sk);
1901 
1902 unlock:
1903 	hci_dev_unlock(hdev);
1904 	return err;
1905 }
1906 
1907 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1908 {
1909 	struct cmd_lookup match = { NULL, hdev };
1910 
1911 	if (status) {
1912 		u8 mgmt_err = mgmt_status(status);
1913 
1914 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1915 				     &mgmt_err);
1916 		return;
1917 	}
1918 
1919 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1920 
1921 	new_settings(hdev, match.sk);
1922 
1923 	if (match.sk)
1924 		sock_put(match.sk);
1925 
1926 	/* Make sure the controller has a good default for
1927 	 * advertising data. Restrict the update to when LE
1928 	 * has actually been enabled. During power on, the
1929 	 * update in powered_update_hci will take care of it.
1930 	 */
1931 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1932 		struct hci_request req;
1933 
1934 		hci_dev_lock(hdev);
1935 
1936 		hci_req_init(&req, hdev);
1937 		update_adv_data(&req);
1938 		update_scan_rsp_data(&req);
1939 		hci_req_run(&req, NULL);
1940 
1941 		hci_dev_unlock(hdev);
1942 	}
1943 }
1944 
1945 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1946 {
1947 	struct mgmt_mode *cp = data;
1948 	struct hci_cp_write_le_host_supported hci_cp;
1949 	struct pending_cmd *cmd;
1950 	struct hci_request req;
1951 	int err;
1952 	u8 val, enabled;
1953 
1954 	BT_DBG("request for %s", hdev->name);
1955 
1956 	if (!lmp_le_capable(hdev))
1957 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1958 				  MGMT_STATUS_NOT_SUPPORTED);
1959 
1960 	if (cp->val != 0x00 && cp->val != 0x01)
1961 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1962 				  MGMT_STATUS_INVALID_PARAMS);
1963 
1964 	/* LE-only devices do not allow toggling LE on/off */
1965 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1966 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1967 				  MGMT_STATUS_REJECTED);
1968 
1969 	hci_dev_lock(hdev);
1970 
1971 	val = !!cp->val;
1972 	enabled = lmp_host_le_capable(hdev);
1973 
1974 	if (!hdev_is_powered(hdev) || val == enabled) {
1975 		bool changed = false;
1976 
1977 		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1978 			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1979 			changed = true;
1980 		}
1981 
1982 		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1983 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1984 			changed = true;
1985 		}
1986 
1987 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1988 		if (err < 0)
1989 			goto unlock;
1990 
1991 		if (changed)
1992 			err = new_settings(hdev, sk);
1993 
1994 		goto unlock;
1995 	}
1996 
1997 	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1998 	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1999 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2000 				 MGMT_STATUS_BUSY);
2001 		goto unlock;
2002 	}
2003 
2004 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2005 	if (!cmd) {
2006 		err = -ENOMEM;
2007 		goto unlock;
2008 	}
2009 
2010 	hci_req_init(&req, hdev);
2011 
2012 	memset(&hci_cp, 0, sizeof(hci_cp));
2013 
2014 	if (val) {
2015 		hci_cp.le = val;
2016 		hci_cp.simul = lmp_le_br_capable(hdev);
2017 	} else {
2018 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2019 			disable_advertising(&req);
2020 	}
2021 
2022 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2023 		    &hci_cp);
2024 
2025 	err = hci_req_run(&req, le_enable_complete);
2026 	if (err < 0)
2027 		mgmt_pending_remove(cmd);
2028 
2029 unlock:
2030 	hci_dev_unlock(hdev);
2031 	return err;
2032 }
2033 
2034 /* This is a helper function to test for pending mgmt commands that can
2035  * cause CoD or EIR HCI commands. We can only allow one such pending
2036  * mgmt command at a time since otherwise we cannot easily track what
2037  * the current values are, will be, and based on that calculate if a new
2038  * HCI command needs to be sent and if yes with what value.
2039  */
2040 static bool pending_eir_or_class(struct hci_dev *hdev)
2041 {
2042 	struct pending_cmd *cmd;
2043 
2044 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2045 		switch (cmd->opcode) {
2046 		case MGMT_OP_ADD_UUID:
2047 		case MGMT_OP_REMOVE_UUID:
2048 		case MGMT_OP_SET_DEV_CLASS:
2049 		case MGMT_OP_SET_POWERED:
2050 			return true;
2051 		}
2052 	}
2053 
2054 	return false;
2055 }
2056 
2057 static const u8 bluetooth_base_uuid[] = {
2058 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2059 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2060 };
2061 
2062 static u8 get_uuid_size(const u8 *uuid)
2063 {
2064 	u32 val;
2065 
2066 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2067 		return 128;
2068 
2069 	val = get_unaligned_le32(&uuid[12]);
2070 	if (val > 0xffff)
2071 		return 32;
2072 
2073 	return 16;
2074 }
2075 
2076 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2077 {
2078 	struct pending_cmd *cmd;
2079 
2080 	hci_dev_lock(hdev);
2081 
2082 	cmd = mgmt_pending_find(mgmt_op, hdev);
2083 	if (!cmd)
2084 		goto unlock;
2085 
2086 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2087 		     hdev->dev_class, 3);
2088 
2089 	mgmt_pending_remove(cmd);
2090 
2091 unlock:
2092 	hci_dev_unlock(hdev);
2093 }
2094 
2095 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2096 {
2097 	BT_DBG("status 0x%02x", status);
2098 
2099 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2100 }
2101 
2102 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2103 {
2104 	struct mgmt_cp_add_uuid *cp = data;
2105 	struct pending_cmd *cmd;
2106 	struct hci_request req;
2107 	struct bt_uuid *uuid;
2108 	int err;
2109 
2110 	BT_DBG("request for %s", hdev->name);
2111 
2112 	hci_dev_lock(hdev);
2113 
2114 	if (pending_eir_or_class(hdev)) {
2115 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2116 				 MGMT_STATUS_BUSY);
2117 		goto failed;
2118 	}
2119 
2120 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2121 	if (!uuid) {
2122 		err = -ENOMEM;
2123 		goto failed;
2124 	}
2125 
2126 	memcpy(uuid->uuid, cp->uuid, 16);
2127 	uuid->svc_hint = cp->svc_hint;
2128 	uuid->size = get_uuid_size(cp->uuid);
2129 
2130 	list_add_tail(&uuid->list, &hdev->uuids);
2131 
2132 	hci_req_init(&req, hdev);
2133 
2134 	update_class(&req);
2135 	update_eir(&req);
2136 
2137 	err = hci_req_run(&req, add_uuid_complete);
2138 	if (err < 0) {
2139 		if (err != -ENODATA)
2140 			goto failed;
2141 
2142 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2143 				   hdev->dev_class, 3);
2144 		goto failed;
2145 	}
2146 
2147 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2148 	if (!cmd) {
2149 		err = -ENOMEM;
2150 		goto failed;
2151 	}
2152 
2153 	err = 0;
2154 
2155 failed:
2156 	hci_dev_unlock(hdev);
2157 	return err;
2158 }
2159 
2160 static bool enable_service_cache(struct hci_dev *hdev)
2161 {
2162 	if (!hdev_is_powered(hdev))
2163 		return false;
2164 
2165 	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2166 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2167 				   CACHE_TIMEOUT);
2168 		return true;
2169 	}
2170 
2171 	return false;
2172 }
2173 
2174 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2175 {
2176 	BT_DBG("status 0x%02x", status);
2177 
2178 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2179 }
2180 
2181 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2182 		       u16 len)
2183 {
2184 	struct mgmt_cp_remove_uuid *cp = data;
2185 	struct pending_cmd *cmd;
2186 	struct bt_uuid *match, *tmp;
2187 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2188 	struct hci_request req;
2189 	int err, found;
2190 
2191 	BT_DBG("request for %s", hdev->name);
2192 
2193 	hci_dev_lock(hdev);
2194 
2195 	if (pending_eir_or_class(hdev)) {
2196 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2197 				 MGMT_STATUS_BUSY);
2198 		goto unlock;
2199 	}
2200 
2201 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2202 		hci_uuids_clear(hdev);
2203 
2204 		if (enable_service_cache(hdev)) {
2205 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2206 					   0, hdev->dev_class, 3);
2207 			goto unlock;
2208 		}
2209 
2210 		goto update_class;
2211 	}
2212 
2213 	found = 0;
2214 
2215 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2216 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2217 			continue;
2218 
2219 		list_del(&match->list);
2220 		kfree(match);
2221 		found++;
2222 	}
2223 
2224 	if (found == 0) {
2225 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2226 				 MGMT_STATUS_INVALID_PARAMS);
2227 		goto unlock;
2228 	}
2229 
2230 update_class:
2231 	hci_req_init(&req, hdev);
2232 
2233 	update_class(&req);
2234 	update_eir(&req);
2235 
2236 	err = hci_req_run(&req, remove_uuid_complete);
2237 	if (err < 0) {
2238 		if (err != -ENODATA)
2239 			goto unlock;
2240 
2241 		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2242 				   hdev->dev_class, 3);
2243 		goto unlock;
2244 	}
2245 
2246 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2247 	if (!cmd) {
2248 		err = -ENOMEM;
2249 		goto unlock;
2250 	}
2251 
2252 	err = 0;
2253 
2254 unlock:
2255 	hci_dev_unlock(hdev);
2256 	return err;
2257 }
2258 
2259 static void set_class_complete(struct hci_dev *hdev, u8 status)
2260 {
2261 	BT_DBG("status 0x%02x", status);
2262 
2263 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2264 }
2265 
2266 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2267 			 u16 len)
2268 {
2269 	struct mgmt_cp_set_dev_class *cp = data;
2270 	struct pending_cmd *cmd;
2271 	struct hci_request req;
2272 	int err;
2273 
2274 	BT_DBG("request for %s", hdev->name);
2275 
2276 	if (!lmp_bredr_capable(hdev))
2277 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2278 				  MGMT_STATUS_NOT_SUPPORTED);
2279 
2280 	hci_dev_lock(hdev);
2281 
2282 	if (pending_eir_or_class(hdev)) {
2283 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 				 MGMT_STATUS_BUSY);
2285 		goto unlock;
2286 	}
2287 
2288 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2289 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2290 				 MGMT_STATUS_INVALID_PARAMS);
2291 		goto unlock;
2292 	}
2293 
2294 	hdev->major_class = cp->major;
2295 	hdev->minor_class = cp->minor;
2296 
2297 	if (!hdev_is_powered(hdev)) {
2298 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2299 				   hdev->dev_class, 3);
2300 		goto unlock;
2301 	}
2302 
2303 	hci_req_init(&req, hdev);
2304 
2305 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2306 		hci_dev_unlock(hdev);
2307 		cancel_delayed_work_sync(&hdev->service_cache);
2308 		hci_dev_lock(hdev);
2309 		update_eir(&req);
2310 	}
2311 
2312 	update_class(&req);
2313 
2314 	err = hci_req_run(&req, set_class_complete);
2315 	if (err < 0) {
2316 		if (err != -ENODATA)
2317 			goto unlock;
2318 
2319 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2320 				   hdev->dev_class, 3);
2321 		goto unlock;
2322 	}
2323 
2324 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2325 	if (!cmd) {
2326 		err = -ENOMEM;
2327 		goto unlock;
2328 	}
2329 
2330 	err = 0;
2331 
2332 unlock:
2333 	hci_dev_unlock(hdev);
2334 	return err;
2335 }
2336 
2337 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2338 			  u16 len)
2339 {
2340 	struct mgmt_cp_load_link_keys *cp = data;
2341 	u16 key_count, expected_len;
2342 	bool changed;
2343 	int i;
2344 
2345 	BT_DBG("request for %s", hdev->name);
2346 
2347 	if (!lmp_bredr_capable(hdev))
2348 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2349 				  MGMT_STATUS_NOT_SUPPORTED);
2350 
2351 	key_count = __le16_to_cpu(cp->key_count);
2352 
2353 	expected_len = sizeof(*cp) + key_count *
2354 					sizeof(struct mgmt_link_key_info);
2355 	if (expected_len != len) {
2356 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2357 		       expected_len, len);
2358 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2359 				  MGMT_STATUS_INVALID_PARAMS);
2360 	}
2361 
2362 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2363 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 				  MGMT_STATUS_INVALID_PARAMS);
2365 
2366 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2367 	       key_count);
2368 
2369 	for (i = 0; i < key_count; i++) {
2370 		struct mgmt_link_key_info *key = &cp->keys[i];
2371 
2372 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2373 			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2374 					  MGMT_STATUS_INVALID_PARAMS);
2375 	}
2376 
2377 	hci_dev_lock(hdev);
2378 
2379 	hci_link_keys_clear(hdev);
2380 
2381 	if (cp->debug_keys)
2382 		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2383 	else
2384 		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2385 
2386 	if (changed)
2387 		new_settings(hdev, NULL);
2388 
2389 	for (i = 0; i < key_count; i++) {
2390 		struct mgmt_link_key_info *key = &cp->keys[i];
2391 
2392 		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2393 				 key->type, key->pin_len);
2394 	}
2395 
2396 	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2397 
2398 	hci_dev_unlock(hdev);
2399 
2400 	return 0;
2401 }
2402 
2403 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2404 			   u8 addr_type, struct sock *skip_sk)
2405 {
2406 	struct mgmt_ev_device_unpaired ev;
2407 
2408 	bacpy(&ev.addr.bdaddr, bdaddr);
2409 	ev.addr.type = addr_type;
2410 
2411 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2412 			  skip_sk);
2413 }
2414 
2415 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2416 			 u16 len)
2417 {
2418 	struct mgmt_cp_unpair_device *cp = data;
2419 	struct mgmt_rp_unpair_device rp;
2420 	struct hci_cp_disconnect dc;
2421 	struct pending_cmd *cmd;
2422 	struct hci_conn *conn;
2423 	int err;
2424 
2425 	memset(&rp, 0, sizeof(rp));
2426 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2427 	rp.addr.type = cp->addr.type;
2428 
2429 	if (!bdaddr_type_is_valid(cp->addr.type))
2430 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2431 				    MGMT_STATUS_INVALID_PARAMS,
2432 				    &rp, sizeof(rp));
2433 
2434 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2435 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2436 				    MGMT_STATUS_INVALID_PARAMS,
2437 				    &rp, sizeof(rp));
2438 
2439 	hci_dev_lock(hdev);
2440 
2441 	if (!hdev_is_powered(hdev)) {
2442 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2443 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2444 		goto unlock;
2445 	}
2446 
2447 	if (cp->addr.type == BDADDR_BREDR) {
2448 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2449 	} else {
2450 		u8 addr_type;
2451 
2452 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2453 			addr_type = ADDR_LE_DEV_PUBLIC;
2454 		else
2455 			addr_type = ADDR_LE_DEV_RANDOM;
2456 
2457 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2458 
2459 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2460 
2461 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2462 	}
2463 
2464 	if (err < 0) {
2465 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2467 		goto unlock;
2468 	}
2469 
2470 	if (cp->disconnect) {
2471 		if (cp->addr.type == BDADDR_BREDR)
2472 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2473 						       &cp->addr.bdaddr);
2474 		else
2475 			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2476 						       &cp->addr.bdaddr);
2477 	} else {
2478 		conn = NULL;
2479 	}
2480 
2481 	if (!conn) {
2482 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2483 				   &rp, sizeof(rp));
2484 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2485 		goto unlock;
2486 	}
2487 
2488 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2489 			       sizeof(*cp));
2490 	if (!cmd) {
2491 		err = -ENOMEM;
2492 		goto unlock;
2493 	}
2494 
2495 	dc.handle = cpu_to_le16(conn->handle);
2496 	dc.reason = 0x13; /* Remote User Terminated Connection */
2497 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2498 	if (err < 0)
2499 		mgmt_pending_remove(cmd);
2500 
2501 unlock:
2502 	hci_dev_unlock(hdev);
2503 	return err;
2504 }
2505 
2506 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2507 		      u16 len)
2508 {
2509 	struct mgmt_cp_disconnect *cp = data;
2510 	struct mgmt_rp_disconnect rp;
2511 	struct hci_cp_disconnect dc;
2512 	struct pending_cmd *cmd;
2513 	struct hci_conn *conn;
2514 	int err;
2515 
2516 	BT_DBG("");
2517 
2518 	memset(&rp, 0, sizeof(rp));
2519 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2520 	rp.addr.type = cp->addr.type;
2521 
2522 	if (!bdaddr_type_is_valid(cp->addr.type))
2523 		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2524 				    MGMT_STATUS_INVALID_PARAMS,
2525 				    &rp, sizeof(rp));
2526 
2527 	hci_dev_lock(hdev);
2528 
2529 	if (!test_bit(HCI_UP, &hdev->flags)) {
2530 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2531 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2532 		goto failed;
2533 	}
2534 
2535 	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2536 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2537 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2538 		goto failed;
2539 	}
2540 
2541 	if (cp->addr.type == BDADDR_BREDR)
2542 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2543 					       &cp->addr.bdaddr);
2544 	else
2545 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2546 
2547 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2548 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2549 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2550 		goto failed;
2551 	}
2552 
2553 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2554 	if (!cmd) {
2555 		err = -ENOMEM;
2556 		goto failed;
2557 	}
2558 
2559 	dc.handle = cpu_to_le16(conn->handle);
2560 	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2561 
2562 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2563 	if (err < 0)
2564 		mgmt_pending_remove(cmd);
2565 
2566 failed:
2567 	hci_dev_unlock(hdev);
2568 	return err;
2569 }
2570 
2571 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2572 {
2573 	switch (link_type) {
2574 	case LE_LINK:
2575 		switch (addr_type) {
2576 		case ADDR_LE_DEV_PUBLIC:
2577 			return BDADDR_LE_PUBLIC;
2578 
2579 		default:
2580 			/* Fallback to LE Random address type */
2581 			return BDADDR_LE_RANDOM;
2582 		}
2583 
2584 	default:
2585 		/* Fallback to BR/EDR type */
2586 		return BDADDR_BREDR;
2587 	}
2588 }
2589 
2590 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2591 			   u16 data_len)
2592 {
2593 	struct mgmt_rp_get_connections *rp;
2594 	struct hci_conn *c;
2595 	size_t rp_len;
2596 	int err;
2597 	u16 i;
2598 
2599 	BT_DBG("");
2600 
2601 	hci_dev_lock(hdev);
2602 
2603 	if (!hdev_is_powered(hdev)) {
2604 		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2605 				 MGMT_STATUS_NOT_POWERED);
2606 		goto unlock;
2607 	}
2608 
2609 	i = 0;
2610 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2611 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2612 			i++;
2613 	}
2614 
2615 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2616 	rp = kmalloc(rp_len, GFP_KERNEL);
2617 	if (!rp) {
2618 		err = -ENOMEM;
2619 		goto unlock;
2620 	}
2621 
2622 	i = 0;
2623 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2624 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2625 			continue;
2626 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2627 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2628 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2629 			continue;
2630 		i++;
2631 	}
2632 
2633 	rp->conn_count = cpu_to_le16(i);
2634 
2635 	/* Recalculate length in case of filtered SCO connections, etc */
2636 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2637 
2638 	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2639 			   rp_len);
2640 
2641 	kfree(rp);
2642 
2643 unlock:
2644 	hci_dev_unlock(hdev);
2645 	return err;
2646 }
2647 
2648 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2649 				   struct mgmt_cp_pin_code_neg_reply *cp)
2650 {
2651 	struct pending_cmd *cmd;
2652 	int err;
2653 
2654 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2655 			       sizeof(*cp));
2656 	if (!cmd)
2657 		return -ENOMEM;
2658 
2659 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2660 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2661 	if (err < 0)
2662 		mgmt_pending_remove(cmd);
2663 
2664 	return err;
2665 }
2666 
2667 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2668 			  u16 len)
2669 {
2670 	struct hci_conn *conn;
2671 	struct mgmt_cp_pin_code_reply *cp = data;
2672 	struct hci_cp_pin_code_reply reply;
2673 	struct pending_cmd *cmd;
2674 	int err;
2675 
2676 	BT_DBG("");
2677 
2678 	hci_dev_lock(hdev);
2679 
2680 	if (!hdev_is_powered(hdev)) {
2681 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2682 				 MGMT_STATUS_NOT_POWERED);
2683 		goto failed;
2684 	}
2685 
2686 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2687 	if (!conn) {
2688 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2689 				 MGMT_STATUS_NOT_CONNECTED);
2690 		goto failed;
2691 	}
2692 
2693 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2694 		struct mgmt_cp_pin_code_neg_reply ncp;
2695 
2696 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2697 
2698 		BT_ERR("PIN code is not 16 bytes long");
2699 
2700 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2701 		if (err >= 0)
2702 			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2703 					 MGMT_STATUS_INVALID_PARAMS);
2704 
2705 		goto failed;
2706 	}
2707 
2708 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2709 	if (!cmd) {
2710 		err = -ENOMEM;
2711 		goto failed;
2712 	}
2713 
2714 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2715 	reply.pin_len = cp->pin_len;
2716 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2717 
2718 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2719 	if (err < 0)
2720 		mgmt_pending_remove(cmd);
2721 
2722 failed:
2723 	hci_dev_unlock(hdev);
2724 	return err;
2725 }
2726 
2727 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2728 			     u16 len)
2729 {
2730 	struct mgmt_cp_set_io_capability *cp = data;
2731 
2732 	BT_DBG("");
2733 
2734 	hci_dev_lock(hdev);
2735 
2736 	hdev->io_capability = cp->io_capability;
2737 
2738 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2739 	       hdev->io_capability);
2740 
2741 	hci_dev_unlock(hdev);
2742 
2743 	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2744 			    0);
2745 }
2746 
2747 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2748 {
2749 	struct hci_dev *hdev = conn->hdev;
2750 	struct pending_cmd *cmd;
2751 
2752 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2753 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2754 			continue;
2755 
2756 		if (cmd->user_data != conn)
2757 			continue;
2758 
2759 		return cmd;
2760 	}
2761 
2762 	return NULL;
2763 }
2764 
2765 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2766 {
2767 	struct mgmt_rp_pair_device rp;
2768 	struct hci_conn *conn = cmd->user_data;
2769 
2770 	bacpy(&rp.addr.bdaddr, &conn->dst);
2771 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2772 
2773 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2774 		     &rp, sizeof(rp));
2775 
2776 	/* So we don't get further callbacks for this connection */
2777 	conn->connect_cfm_cb = NULL;
2778 	conn->security_cfm_cb = NULL;
2779 	conn->disconn_cfm_cb = NULL;
2780 
2781 	hci_conn_drop(conn);
2782 
2783 	mgmt_pending_remove(cmd);
2784 }
2785 
2786 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2787 {
2788 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2789 	struct pending_cmd *cmd;
2790 
2791 	cmd = find_pairing(conn);
2792 	if (cmd)
2793 		pairing_complete(cmd, status);
2794 }
2795 
2796 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2797 {
2798 	struct pending_cmd *cmd;
2799 
2800 	BT_DBG("status %u", status);
2801 
2802 	cmd = find_pairing(conn);
2803 	if (!cmd)
2804 		BT_DBG("Unable to find a pending command");
2805 	else
2806 		pairing_complete(cmd, mgmt_status(status));
2807 }
2808 
2809 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2810 {
2811 	struct pending_cmd *cmd;
2812 
2813 	BT_DBG("status %u", status);
2814 
2815 	if (!status)
2816 		return;
2817 
2818 	cmd = find_pairing(conn);
2819 	if (!cmd)
2820 		BT_DBG("Unable to find a pending command");
2821 	else
2822 		pairing_complete(cmd, mgmt_status(status));
2823 }
2824 
2825 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2826 		       u16 len)
2827 {
2828 	struct mgmt_cp_pair_device *cp = data;
2829 	struct mgmt_rp_pair_device rp;
2830 	struct pending_cmd *cmd;
2831 	u8 sec_level, auth_type;
2832 	struct hci_conn *conn;
2833 	int err;
2834 
2835 	BT_DBG("");
2836 
2837 	memset(&rp, 0, sizeof(rp));
2838 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2839 	rp.addr.type = cp->addr.type;
2840 
2841 	if (!bdaddr_type_is_valid(cp->addr.type))
2842 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2843 				    MGMT_STATUS_INVALID_PARAMS,
2844 				    &rp, sizeof(rp));
2845 
2846 	hci_dev_lock(hdev);
2847 
2848 	if (!hdev_is_powered(hdev)) {
2849 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2850 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2851 		goto unlock;
2852 	}
2853 
2854 	sec_level = BT_SECURITY_MEDIUM;
2855 	auth_type = HCI_AT_DEDICATED_BONDING;
2856 
2857 	if (cp->addr.type == BDADDR_BREDR) {
2858 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2859 				       auth_type);
2860 	} else {
2861 		u8 addr_type;
2862 
2863 		/* Convert from L2CAP channel address type to HCI address type
2864 		 */
2865 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2866 			addr_type = ADDR_LE_DEV_PUBLIC;
2867 		else
2868 			addr_type = ADDR_LE_DEV_RANDOM;
2869 
2870 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2871 				      sec_level, auth_type);
2872 	}
2873 
2874 	if (IS_ERR(conn)) {
2875 		int status;
2876 
2877 		if (PTR_ERR(conn) == -EBUSY)
2878 			status = MGMT_STATUS_BUSY;
2879 		else
2880 			status = MGMT_STATUS_CONNECT_FAILED;
2881 
2882 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2883 				   status, &rp,
2884 				   sizeof(rp));
2885 		goto unlock;
2886 	}
2887 
2888 	if (conn->connect_cfm_cb) {
2889 		hci_conn_drop(conn);
2890 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2891 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2892 		goto unlock;
2893 	}
2894 
2895 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2896 	if (!cmd) {
2897 		err = -ENOMEM;
2898 		hci_conn_drop(conn);
2899 		goto unlock;
2900 	}
2901 
2902 	/* For LE, just connecting isn't a proof that the pairing finished */
2903 	if (cp->addr.type == BDADDR_BREDR) {
2904 		conn->connect_cfm_cb = pairing_complete_cb;
2905 		conn->security_cfm_cb = pairing_complete_cb;
2906 		conn->disconn_cfm_cb = pairing_complete_cb;
2907 	} else {
2908 		conn->connect_cfm_cb = le_pairing_complete_cb;
2909 		conn->security_cfm_cb = le_pairing_complete_cb;
2910 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2911 	}
2912 
2913 	conn->io_capability = cp->io_cap;
2914 	cmd->user_data = conn;
2915 
2916 	if (conn->state == BT_CONNECTED &&
2917 	    hci_conn_security(conn, sec_level, auth_type))
2918 		pairing_complete(cmd, 0);
2919 
2920 	err = 0;
2921 
2922 unlock:
2923 	hci_dev_unlock(hdev);
2924 	return err;
2925 }
2926 
2927 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2928 			      u16 len)
2929 {
2930 	struct mgmt_addr_info *addr = data;
2931 	struct pending_cmd *cmd;
2932 	struct hci_conn *conn;
2933 	int err;
2934 
2935 	BT_DBG("");
2936 
2937 	hci_dev_lock(hdev);
2938 
2939 	if (!hdev_is_powered(hdev)) {
2940 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2941 				 MGMT_STATUS_NOT_POWERED);
2942 		goto unlock;
2943 	}
2944 
2945 	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2946 	if (!cmd) {
2947 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2948 				 MGMT_STATUS_INVALID_PARAMS);
2949 		goto unlock;
2950 	}
2951 
2952 	conn = cmd->user_data;
2953 
2954 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2955 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2956 				 MGMT_STATUS_INVALID_PARAMS);
2957 		goto unlock;
2958 	}
2959 
2960 	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2961 
2962 	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2963 			   addr, sizeof(*addr));
2964 unlock:
2965 	hci_dev_unlock(hdev);
2966 	return err;
2967 }
2968 
2969 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2970 			     struct mgmt_addr_info *addr, u16 mgmt_op,
2971 			     u16 hci_op, __le32 passkey)
2972 {
2973 	struct pending_cmd *cmd;
2974 	struct hci_conn *conn;
2975 	int err;
2976 
2977 	hci_dev_lock(hdev);
2978 
2979 	if (!hdev_is_powered(hdev)) {
2980 		err = cmd_complete(sk, hdev->id, mgmt_op,
2981 				   MGMT_STATUS_NOT_POWERED, addr,
2982 				   sizeof(*addr));
2983 		goto done;
2984 	}
2985 
2986 	if (addr->type == BDADDR_BREDR)
2987 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2988 	else
2989 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2990 
2991 	if (!conn) {
2992 		err = cmd_complete(sk, hdev->id, mgmt_op,
2993 				   MGMT_STATUS_NOT_CONNECTED, addr,
2994 				   sizeof(*addr));
2995 		goto done;
2996 	}
2997 
2998 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2999 		/* Continue with pairing via SMP */
3000 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3001 
3002 		if (!err)
3003 			err = cmd_complete(sk, hdev->id, mgmt_op,
3004 					   MGMT_STATUS_SUCCESS, addr,
3005 					   sizeof(*addr));
3006 		else
3007 			err = cmd_complete(sk, hdev->id, mgmt_op,
3008 					   MGMT_STATUS_FAILED, addr,
3009 					   sizeof(*addr));
3010 
3011 		goto done;
3012 	}
3013 
3014 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3015 	if (!cmd) {
3016 		err = -ENOMEM;
3017 		goto done;
3018 	}
3019 
3020 	/* Continue with pairing via HCI */
3021 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3022 		struct hci_cp_user_passkey_reply cp;
3023 
3024 		bacpy(&cp.bdaddr, &addr->bdaddr);
3025 		cp.passkey = passkey;
3026 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3027 	} else
3028 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3029 				   &addr->bdaddr);
3030 
3031 	if (err < 0)
3032 		mgmt_pending_remove(cmd);
3033 
3034 done:
3035 	hci_dev_unlock(hdev);
3036 	return err;
3037 }
3038 
3039 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3040 			      void *data, u16 len)
3041 {
3042 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3043 
3044 	BT_DBG("");
3045 
3046 	return user_pairing_resp(sk, hdev, &cp->addr,
3047 				MGMT_OP_PIN_CODE_NEG_REPLY,
3048 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3049 }
3050 
3051 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3052 			      u16 len)
3053 {
3054 	struct mgmt_cp_user_confirm_reply *cp = data;
3055 
3056 	BT_DBG("");
3057 
3058 	if (len != sizeof(*cp))
3059 		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3060 				  MGMT_STATUS_INVALID_PARAMS);
3061 
3062 	return user_pairing_resp(sk, hdev, &cp->addr,
3063 				 MGMT_OP_USER_CONFIRM_REPLY,
3064 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3065 }
3066 
3067 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3068 				  void *data, u16 len)
3069 {
3070 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3071 
3072 	BT_DBG("");
3073 
3074 	return user_pairing_resp(sk, hdev, &cp->addr,
3075 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3076 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3077 }
3078 
3079 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3080 			      u16 len)
3081 {
3082 	struct mgmt_cp_user_passkey_reply *cp = data;
3083 
3084 	BT_DBG("");
3085 
3086 	return user_pairing_resp(sk, hdev, &cp->addr,
3087 				 MGMT_OP_USER_PASSKEY_REPLY,
3088 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3089 }
3090 
3091 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3092 				  void *data, u16 len)
3093 {
3094 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3095 
3096 	BT_DBG("");
3097 
3098 	return user_pairing_resp(sk, hdev, &cp->addr,
3099 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3100 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3101 }
3102 
3103 static void update_name(struct hci_request *req)
3104 {
3105 	struct hci_dev *hdev = req->hdev;
3106 	struct hci_cp_write_local_name cp;
3107 
3108 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3109 
3110 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3111 }
3112 
3113 static void set_name_complete(struct hci_dev *hdev, u8 status)
3114 {
3115 	struct mgmt_cp_set_local_name *cp;
3116 	struct pending_cmd *cmd;
3117 
3118 	BT_DBG("status 0x%02x", status);
3119 
3120 	hci_dev_lock(hdev);
3121 
3122 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3123 	if (!cmd)
3124 		goto unlock;
3125 
3126 	cp = cmd->param;
3127 
3128 	if (status)
3129 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3130 			   mgmt_status(status));
3131 	else
3132 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3133 			     cp, sizeof(*cp));
3134 
3135 	mgmt_pending_remove(cmd);
3136 
3137 unlock:
3138 	hci_dev_unlock(hdev);
3139 }
3140 
3141 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3142 			  u16 len)
3143 {
3144 	struct mgmt_cp_set_local_name *cp = data;
3145 	struct pending_cmd *cmd;
3146 	struct hci_request req;
3147 	int err;
3148 
3149 	BT_DBG("");
3150 
3151 	hci_dev_lock(hdev);
3152 
3153 	/* If the old values are the same as the new ones just return a
3154 	 * direct command complete event.
3155 	 */
3156 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3157 	    !memcmp(hdev->short_name, cp->short_name,
3158 		    sizeof(hdev->short_name))) {
3159 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3160 				   data, len);
3161 		goto failed;
3162 	}
3163 
3164 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3165 
3166 	if (!hdev_is_powered(hdev)) {
3167 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3168 
3169 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3170 				   data, len);
3171 		if (err < 0)
3172 			goto failed;
3173 
3174 		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3175 				 sk);
3176 
3177 		goto failed;
3178 	}
3179 
3180 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3181 	if (!cmd) {
3182 		err = -ENOMEM;
3183 		goto failed;
3184 	}
3185 
3186 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3187 
3188 	hci_req_init(&req, hdev);
3189 
3190 	if (lmp_bredr_capable(hdev)) {
3191 		update_name(&req);
3192 		update_eir(&req);
3193 	}
3194 
3195 	/* The name is stored in the scan response data and so
3196 	 * no need to udpate the advertising data here.
3197 	 */
3198 	if (lmp_le_capable(hdev))
3199 		update_scan_rsp_data(&req);
3200 
3201 	err = hci_req_run(&req, set_name_complete);
3202 	if (err < 0)
3203 		mgmt_pending_remove(cmd);
3204 
3205 failed:
3206 	hci_dev_unlock(hdev);
3207 	return err;
3208 }
3209 
3210 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3211 			       void *data, u16 data_len)
3212 {
3213 	struct pending_cmd *cmd;
3214 	int err;
3215 
3216 	BT_DBG("%s", hdev->name);
3217 
3218 	hci_dev_lock(hdev);
3219 
3220 	if (!hdev_is_powered(hdev)) {
3221 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3222 				 MGMT_STATUS_NOT_POWERED);
3223 		goto unlock;
3224 	}
3225 
3226 	if (!lmp_ssp_capable(hdev)) {
3227 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3228 				 MGMT_STATUS_NOT_SUPPORTED);
3229 		goto unlock;
3230 	}
3231 
3232 	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3233 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3234 				 MGMT_STATUS_BUSY);
3235 		goto unlock;
3236 	}
3237 
3238 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3239 	if (!cmd) {
3240 		err = -ENOMEM;
3241 		goto unlock;
3242 	}
3243 
3244 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3245 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3246 				   0, NULL);
3247 	else
3248 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3249 
3250 	if (err < 0)
3251 		mgmt_pending_remove(cmd);
3252 
3253 unlock:
3254 	hci_dev_unlock(hdev);
3255 	return err;
3256 }
3257 
3258 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3259 			       void *data, u16 len)
3260 {
3261 	int err;
3262 
3263 	BT_DBG("%s ", hdev->name);
3264 
3265 	hci_dev_lock(hdev);
3266 
3267 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3268 		struct mgmt_cp_add_remote_oob_data *cp = data;
3269 		u8 status;
3270 
3271 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3272 					      cp->hash, cp->randomizer);
3273 		if (err < 0)
3274 			status = MGMT_STATUS_FAILED;
3275 		else
3276 			status = MGMT_STATUS_SUCCESS;
3277 
3278 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3279 				   status, &cp->addr, sizeof(cp->addr));
3280 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3281 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3282 		u8 status;
3283 
3284 		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3285 						  cp->hash192,
3286 						  cp->randomizer192,
3287 						  cp->hash256,
3288 						  cp->randomizer256);
3289 		if (err < 0)
3290 			status = MGMT_STATUS_FAILED;
3291 		else
3292 			status = MGMT_STATUS_SUCCESS;
3293 
3294 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3295 				   status, &cp->addr, sizeof(cp->addr));
3296 	} else {
3297 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3298 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3299 				 MGMT_STATUS_INVALID_PARAMS);
3300 	}
3301 
3302 	hci_dev_unlock(hdev);
3303 	return err;
3304 }
3305 
3306 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3307 				  void *data, u16 len)
3308 {
3309 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3310 	u8 status;
3311 	int err;
3312 
3313 	BT_DBG("%s", hdev->name);
3314 
3315 	hci_dev_lock(hdev);
3316 
3317 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3318 	if (err < 0)
3319 		status = MGMT_STATUS_INVALID_PARAMS;
3320 	else
3321 		status = MGMT_STATUS_SUCCESS;
3322 
3323 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3324 			   status, &cp->addr, sizeof(cp->addr));
3325 
3326 	hci_dev_unlock(hdev);
3327 	return err;
3328 }
3329 
3330 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3331 {
3332 	struct pending_cmd *cmd;
3333 	u8 type;
3334 	int err;
3335 
3336 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3337 
3338 	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3339 	if (!cmd)
3340 		return -ENOENT;
3341 
3342 	type = hdev->discovery.type;
3343 
3344 	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3345 			   &type, sizeof(type));
3346 	mgmt_pending_remove(cmd);
3347 
3348 	return err;
3349 }
3350 
3351 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3352 {
3353 	unsigned long timeout = 0;
3354 
3355 	BT_DBG("status %d", status);
3356 
3357 	if (status) {
3358 		hci_dev_lock(hdev);
3359 		mgmt_start_discovery_failed(hdev, status);
3360 		hci_dev_unlock(hdev);
3361 		return;
3362 	}
3363 
3364 	hci_dev_lock(hdev);
3365 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3366 	hci_dev_unlock(hdev);
3367 
3368 	switch (hdev->discovery.type) {
3369 	case DISCOV_TYPE_LE:
3370 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3371 		break;
3372 
3373 	case DISCOV_TYPE_INTERLEAVED:
3374 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3375 		break;
3376 
3377 	case DISCOV_TYPE_BREDR:
3378 		break;
3379 
3380 	default:
3381 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3382 	}
3383 
3384 	if (!timeout)
3385 		return;
3386 
3387 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3388 }
3389 
3390 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3391 			   void *data, u16 len)
3392 {
3393 	struct mgmt_cp_start_discovery *cp = data;
3394 	struct pending_cmd *cmd;
3395 	struct hci_cp_le_set_scan_param param_cp;
3396 	struct hci_cp_le_set_scan_enable enable_cp;
3397 	struct hci_cp_inquiry inq_cp;
3398 	struct hci_request req;
3399 	/* General inquiry access code (GIAC) */
3400 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3401 	u8 status, own_addr_type;
3402 	int err;
3403 
3404 	BT_DBG("%s", hdev->name);
3405 
3406 	hci_dev_lock(hdev);
3407 
3408 	if (!hdev_is_powered(hdev)) {
3409 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3410 				 MGMT_STATUS_NOT_POWERED);
3411 		goto failed;
3412 	}
3413 
3414 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3415 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3416 				 MGMT_STATUS_BUSY);
3417 		goto failed;
3418 	}
3419 
3420 	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3421 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3422 				 MGMT_STATUS_BUSY);
3423 		goto failed;
3424 	}
3425 
3426 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3427 	if (!cmd) {
3428 		err = -ENOMEM;
3429 		goto failed;
3430 	}
3431 
3432 	hdev->discovery.type = cp->type;
3433 
3434 	hci_req_init(&req, hdev);
3435 
3436 	switch (hdev->discovery.type) {
3437 	case DISCOV_TYPE_BREDR:
3438 		status = mgmt_bredr_support(hdev);
3439 		if (status) {
3440 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3441 					 status);
3442 			mgmt_pending_remove(cmd);
3443 			goto failed;
3444 		}
3445 
3446 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3447 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3448 					 MGMT_STATUS_BUSY);
3449 			mgmt_pending_remove(cmd);
3450 			goto failed;
3451 		}
3452 
3453 		hci_inquiry_cache_flush(hdev);
3454 
3455 		memset(&inq_cp, 0, sizeof(inq_cp));
3456 		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3457 		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3458 		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3459 		break;
3460 
3461 	case DISCOV_TYPE_LE:
3462 	case DISCOV_TYPE_INTERLEAVED:
3463 		status = mgmt_le_support(hdev);
3464 		if (status) {
3465 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3466 					 status);
3467 			mgmt_pending_remove(cmd);
3468 			goto failed;
3469 		}
3470 
3471 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3472 		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3473 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3474 					 MGMT_STATUS_NOT_SUPPORTED);
3475 			mgmt_pending_remove(cmd);
3476 			goto failed;
3477 		}
3478 
3479 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3480 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3481 					 MGMT_STATUS_REJECTED);
3482 			mgmt_pending_remove(cmd);
3483 			goto failed;
3484 		}
3485 
3486 		/* If controller is scanning, it means the background scanning
3487 		 * is running. Thus, we should temporarily stop it in order to
3488 		 * set the discovery scanning parameters.
3489 		 */
3490 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3491 			hci_req_add_le_scan_disable(&req);
3492 
3493 		memset(&param_cp, 0, sizeof(param_cp));
3494 
3495 		/* All active scans will be done with either a resolvable
3496 		 * private address (when privacy feature has been enabled)
3497 		 * or unresolvable private address.
3498 		 */
3499 		err = hci_update_random_address(&req, true, &own_addr_type);
3500 		if (err < 0) {
3501 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3502 					 MGMT_STATUS_FAILED);
3503 			mgmt_pending_remove(cmd);
3504 			goto failed;
3505 		}
3506 
3507 		param_cp.type = LE_SCAN_ACTIVE;
3508 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3509 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3510 		param_cp.own_address_type = own_addr_type;
3511 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3512 			    &param_cp);
3513 
3514 		memset(&enable_cp, 0, sizeof(enable_cp));
3515 		enable_cp.enable = LE_SCAN_ENABLE;
3516 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3517 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3518 			    &enable_cp);
3519 		break;
3520 
3521 	default:
3522 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3523 				 MGMT_STATUS_INVALID_PARAMS);
3524 		mgmt_pending_remove(cmd);
3525 		goto failed;
3526 	}
3527 
3528 	err = hci_req_run(&req, start_discovery_complete);
3529 	if (err < 0)
3530 		mgmt_pending_remove(cmd);
3531 	else
3532 		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3533 
3534 failed:
3535 	hci_dev_unlock(hdev);
3536 	return err;
3537 }
3538 
3539 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3540 {
3541 	struct pending_cmd *cmd;
3542 	int err;
3543 
3544 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3545 	if (!cmd)
3546 		return -ENOENT;
3547 
3548 	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3549 			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3550 	mgmt_pending_remove(cmd);
3551 
3552 	return err;
3553 }
3554 
3555 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3556 {
3557 	BT_DBG("status %d", status);
3558 
3559 	hci_dev_lock(hdev);
3560 
3561 	if (status) {
3562 		mgmt_stop_discovery_failed(hdev, status);
3563 		goto unlock;
3564 	}
3565 
3566 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3567 
3568 unlock:
3569 	hci_dev_unlock(hdev);
3570 }
3571 
3572 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3573 			  u16 len)
3574 {
3575 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3576 	struct pending_cmd *cmd;
3577 	struct hci_cp_remote_name_req_cancel cp;
3578 	struct inquiry_entry *e;
3579 	struct hci_request req;
3580 	int err;
3581 
3582 	BT_DBG("%s", hdev->name);
3583 
3584 	hci_dev_lock(hdev);
3585 
3586 	if (!hci_discovery_active(hdev)) {
3587 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3588 				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
3589 				   sizeof(mgmt_cp->type));
3590 		goto unlock;
3591 	}
3592 
3593 	if (hdev->discovery.type != mgmt_cp->type) {
3594 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3595 				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3596 				   sizeof(mgmt_cp->type));
3597 		goto unlock;
3598 	}
3599 
3600 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3601 	if (!cmd) {
3602 		err = -ENOMEM;
3603 		goto unlock;
3604 	}
3605 
3606 	hci_req_init(&req, hdev);
3607 
3608 	switch (hdev->discovery.state) {
3609 	case DISCOVERY_FINDING:
3610 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3611 			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3612 		} else {
3613 			cancel_delayed_work(&hdev->le_scan_disable);
3614 
3615 			hci_req_add_le_scan_disable(&req);
3616 		}
3617 
3618 		break;
3619 
3620 	case DISCOVERY_RESOLVING:
3621 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3622 						     NAME_PENDING);
3623 		if (!e) {
3624 			mgmt_pending_remove(cmd);
3625 			err = cmd_complete(sk, hdev->id,
3626 					   MGMT_OP_STOP_DISCOVERY, 0,
3627 					   &mgmt_cp->type,
3628 					   sizeof(mgmt_cp->type));
3629 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3630 			goto unlock;
3631 		}
3632 
3633 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3634 		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3635 			    &cp);
3636 
3637 		break;
3638 
3639 	default:
3640 		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3641 
3642 		mgmt_pending_remove(cmd);
3643 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3644 				   MGMT_STATUS_FAILED, &mgmt_cp->type,
3645 				   sizeof(mgmt_cp->type));
3646 		goto unlock;
3647 	}
3648 
3649 	err = hci_req_run(&req, stop_discovery_complete);
3650 	if (err < 0)
3651 		mgmt_pending_remove(cmd);
3652 	else
3653 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3654 
3655 unlock:
3656 	hci_dev_unlock(hdev);
3657 	return err;
3658 }
3659 
3660 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3661 			u16 len)
3662 {
3663 	struct mgmt_cp_confirm_name *cp = data;
3664 	struct inquiry_entry *e;
3665 	int err;
3666 
3667 	BT_DBG("%s", hdev->name);
3668 
3669 	hci_dev_lock(hdev);
3670 
3671 	if (!hci_discovery_active(hdev)) {
3672 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3673 				   MGMT_STATUS_FAILED, &cp->addr,
3674 				   sizeof(cp->addr));
3675 		goto failed;
3676 	}
3677 
3678 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3679 	if (!e) {
3680 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3681 				   MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3682 				   sizeof(cp->addr));
3683 		goto failed;
3684 	}
3685 
3686 	if (cp->name_known) {
3687 		e->name_state = NAME_KNOWN;
3688 		list_del(&e->list);
3689 	} else {
3690 		e->name_state = NAME_NEEDED;
3691 		hci_inquiry_cache_update_resolve(hdev, e);
3692 	}
3693 
3694 	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3695 			   sizeof(cp->addr));
3696 
3697 failed:
3698 	hci_dev_unlock(hdev);
3699 	return err;
3700 }
3701 
3702 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3703 			u16 len)
3704 {
3705 	struct mgmt_cp_block_device *cp = data;
3706 	u8 status;
3707 	int err;
3708 
3709 	BT_DBG("%s", hdev->name);
3710 
3711 	if (!bdaddr_type_is_valid(cp->addr.type))
3712 		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3713 				    MGMT_STATUS_INVALID_PARAMS,
3714 				    &cp->addr, sizeof(cp->addr));
3715 
3716 	hci_dev_lock(hdev);
3717 
3718 	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3719 	if (err < 0)
3720 		status = MGMT_STATUS_FAILED;
3721 	else
3722 		status = MGMT_STATUS_SUCCESS;
3723 
3724 	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3725 			   &cp->addr, sizeof(cp->addr));
3726 
3727 	hci_dev_unlock(hdev);
3728 
3729 	return err;
3730 }
3731 
3732 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3733 			  u16 len)
3734 {
3735 	struct mgmt_cp_unblock_device *cp = data;
3736 	u8 status;
3737 	int err;
3738 
3739 	BT_DBG("%s", hdev->name);
3740 
3741 	if (!bdaddr_type_is_valid(cp->addr.type))
3742 		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3743 				    MGMT_STATUS_INVALID_PARAMS,
3744 				    &cp->addr, sizeof(cp->addr));
3745 
3746 	hci_dev_lock(hdev);
3747 
3748 	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3749 	if (err < 0)
3750 		status = MGMT_STATUS_INVALID_PARAMS;
3751 	else
3752 		status = MGMT_STATUS_SUCCESS;
3753 
3754 	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3755 			   &cp->addr, sizeof(cp->addr));
3756 
3757 	hci_dev_unlock(hdev);
3758 
3759 	return err;
3760 }
3761 
3762 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3763 			 u16 len)
3764 {
3765 	struct mgmt_cp_set_device_id *cp = data;
3766 	struct hci_request req;
3767 	int err;
3768 	__u16 source;
3769 
3770 	BT_DBG("%s", hdev->name);
3771 
3772 	source = __le16_to_cpu(cp->source);
3773 
3774 	if (source > 0x0002)
3775 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3776 				  MGMT_STATUS_INVALID_PARAMS);
3777 
3778 	hci_dev_lock(hdev);
3779 
3780 	hdev->devid_source = source;
3781 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3782 	hdev->devid_product = __le16_to_cpu(cp->product);
3783 	hdev->devid_version = __le16_to_cpu(cp->version);
3784 
3785 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3786 
3787 	hci_req_init(&req, hdev);
3788 	update_eir(&req);
3789 	hci_req_run(&req, NULL);
3790 
3791 	hci_dev_unlock(hdev);
3792 
3793 	return err;
3794 }
3795 
3796 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3797 {
3798 	struct cmd_lookup match = { NULL, hdev };
3799 
3800 	if (status) {
3801 		u8 mgmt_err = mgmt_status(status);
3802 
3803 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3804 				     cmd_status_rsp, &mgmt_err);
3805 		return;
3806 	}
3807 
3808 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3809 			     &match);
3810 
3811 	new_settings(hdev, match.sk);
3812 
3813 	if (match.sk)
3814 		sock_put(match.sk);
3815 }
3816 
3817 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3818 			   u16 len)
3819 {
3820 	struct mgmt_mode *cp = data;
3821 	struct pending_cmd *cmd;
3822 	struct hci_request req;
3823 	u8 val, enabled, status;
3824 	int err;
3825 
3826 	BT_DBG("request for %s", hdev->name);
3827 
3828 	status = mgmt_le_support(hdev);
3829 	if (status)
3830 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3831 				  status);
3832 
3833 	if (cp->val != 0x00 && cp->val != 0x01)
3834 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3835 				  MGMT_STATUS_INVALID_PARAMS);
3836 
3837 	hci_dev_lock(hdev);
3838 
3839 	val = !!cp->val;
3840 	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3841 
3842 	/* The following conditions are ones which mean that we should
3843 	 * not do any HCI communication but directly send a mgmt
3844 	 * response to user space (after toggling the flag if
3845 	 * necessary).
3846 	 */
3847 	if (!hdev_is_powered(hdev) || val == enabled ||
3848 	    hci_conn_num(hdev, LE_LINK) > 0) {
3849 		bool changed = false;
3850 
3851 		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3852 			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3853 			changed = true;
3854 		}
3855 
3856 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3857 		if (err < 0)
3858 			goto unlock;
3859 
3860 		if (changed)
3861 			err = new_settings(hdev, sk);
3862 
3863 		goto unlock;
3864 	}
3865 
3866 	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3867 	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3868 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3869 				 MGMT_STATUS_BUSY);
3870 		goto unlock;
3871 	}
3872 
3873 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3874 	if (!cmd) {
3875 		err = -ENOMEM;
3876 		goto unlock;
3877 	}
3878 
3879 	hci_req_init(&req, hdev);
3880 
3881 	if (val)
3882 		enable_advertising(&req);
3883 	else
3884 		disable_advertising(&req);
3885 
3886 	err = hci_req_run(&req, set_advertising_complete);
3887 	if (err < 0)
3888 		mgmt_pending_remove(cmd);
3889 
3890 unlock:
3891 	hci_dev_unlock(hdev);
3892 	return err;
3893 }
3894 
3895 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3896 			      void *data, u16 len)
3897 {
3898 	struct mgmt_cp_set_static_address *cp = data;
3899 	int err;
3900 
3901 	BT_DBG("%s", hdev->name);
3902 
3903 	if (!lmp_le_capable(hdev))
3904 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3905 				  MGMT_STATUS_NOT_SUPPORTED);
3906 
3907 	if (hdev_is_powered(hdev))
3908 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3909 				  MGMT_STATUS_REJECTED);
3910 
3911 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3912 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3913 			return cmd_status(sk, hdev->id,
3914 					  MGMT_OP_SET_STATIC_ADDRESS,
3915 					  MGMT_STATUS_INVALID_PARAMS);
3916 
3917 		/* Two most significant bits shall be set */
3918 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3919 			return cmd_status(sk, hdev->id,
3920 					  MGMT_OP_SET_STATIC_ADDRESS,
3921 					  MGMT_STATUS_INVALID_PARAMS);
3922 	}
3923 
3924 	hci_dev_lock(hdev);
3925 
3926 	bacpy(&hdev->static_addr, &cp->bdaddr);
3927 
3928 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3929 
3930 	hci_dev_unlock(hdev);
3931 
3932 	return err;
3933 }
3934 
3935 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3936 			   void *data, u16 len)
3937 {
3938 	struct mgmt_cp_set_scan_params *cp = data;
3939 	__u16 interval, window;
3940 	int err;
3941 
3942 	BT_DBG("%s", hdev->name);
3943 
3944 	if (!lmp_le_capable(hdev))
3945 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3946 				  MGMT_STATUS_NOT_SUPPORTED);
3947 
3948 	interval = __le16_to_cpu(cp->interval);
3949 
3950 	if (interval < 0x0004 || interval > 0x4000)
3951 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3952 				  MGMT_STATUS_INVALID_PARAMS);
3953 
3954 	window = __le16_to_cpu(cp->window);
3955 
3956 	if (window < 0x0004 || window > 0x4000)
3957 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3958 				  MGMT_STATUS_INVALID_PARAMS);
3959 
3960 	if (window > interval)
3961 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3962 				  MGMT_STATUS_INVALID_PARAMS);
3963 
3964 	hci_dev_lock(hdev);
3965 
3966 	hdev->le_scan_interval = interval;
3967 	hdev->le_scan_window = window;
3968 
3969 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3970 
3971 	/* If background scan is running, restart it so new parameters are
3972 	 * loaded.
3973 	 */
3974 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3975 	    hdev->discovery.state == DISCOVERY_STOPPED) {
3976 		struct hci_request req;
3977 
3978 		hci_req_init(&req, hdev);
3979 
3980 		hci_req_add_le_scan_disable(&req);
3981 		hci_req_add_le_passive_scan(&req);
3982 
3983 		hci_req_run(&req, NULL);
3984 	}
3985 
3986 	hci_dev_unlock(hdev);
3987 
3988 	return err;
3989 }
3990 
3991 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3992 {
3993 	struct pending_cmd *cmd;
3994 
3995 	BT_DBG("status 0x%02x", status);
3996 
3997 	hci_dev_lock(hdev);
3998 
3999 	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4000 	if (!cmd)
4001 		goto unlock;
4002 
4003 	if (status) {
4004 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4005 			   mgmt_status(status));
4006 	} else {
4007 		struct mgmt_mode *cp = cmd->param;
4008 
4009 		if (cp->val)
4010 			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4011 		else
4012 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4013 
4014 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4015 		new_settings(hdev, cmd->sk);
4016 	}
4017 
4018 	mgmt_pending_remove(cmd);
4019 
4020 unlock:
4021 	hci_dev_unlock(hdev);
4022 }
4023 
4024 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4025 				void *data, u16 len)
4026 {
4027 	struct mgmt_mode *cp = data;
4028 	struct pending_cmd *cmd;
4029 	struct hci_request req;
4030 	int err;
4031 
4032 	BT_DBG("%s", hdev->name);
4033 
4034 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4035 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4036 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4037 				  MGMT_STATUS_NOT_SUPPORTED);
4038 
4039 	if (cp->val != 0x00 && cp->val != 0x01)
4040 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4041 				  MGMT_STATUS_INVALID_PARAMS);
4042 
4043 	if (!hdev_is_powered(hdev))
4044 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4045 				  MGMT_STATUS_NOT_POWERED);
4046 
4047 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4048 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4049 				  MGMT_STATUS_REJECTED);
4050 
4051 	hci_dev_lock(hdev);
4052 
4053 	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4054 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4055 				 MGMT_STATUS_BUSY);
4056 		goto unlock;
4057 	}
4058 
4059 	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4060 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4061 					hdev);
4062 		goto unlock;
4063 	}
4064 
4065 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4066 			       data, len);
4067 	if (!cmd) {
4068 		err = -ENOMEM;
4069 		goto unlock;
4070 	}
4071 
4072 	hci_req_init(&req, hdev);
4073 
4074 	write_fast_connectable(&req, cp->val);
4075 
4076 	err = hci_req_run(&req, fast_connectable_complete);
4077 	if (err < 0) {
4078 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4079 				 MGMT_STATUS_FAILED);
4080 		mgmt_pending_remove(cmd);
4081 	}
4082 
4083 unlock:
4084 	hci_dev_unlock(hdev);
4085 
4086 	return err;
4087 }
4088 
4089 static void set_bredr_scan(struct hci_request *req)
4090 {
4091 	struct hci_dev *hdev = req->hdev;
4092 	u8 scan = 0;
4093 
4094 	/* Ensure that fast connectable is disabled. This function will
4095 	 * not do anything if the page scan parameters are already what
4096 	 * they should be.
4097 	 */
4098 	write_fast_connectable(req, false);
4099 
4100 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4101 		scan |= SCAN_PAGE;
4102 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4103 		scan |= SCAN_INQUIRY;
4104 
4105 	if (scan)
4106 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4107 }
4108 
4109 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4110 {
4111 	struct pending_cmd *cmd;
4112 
4113 	BT_DBG("status 0x%02x", status);
4114 
4115 	hci_dev_lock(hdev);
4116 
4117 	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4118 	if (!cmd)
4119 		goto unlock;
4120 
4121 	if (status) {
4122 		u8 mgmt_err = mgmt_status(status);
4123 
4124 		/* We need to restore the flag if related HCI commands
4125 		 * failed.
4126 		 */
4127 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4128 
4129 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4130 	} else {
4131 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4132 		new_settings(hdev, cmd->sk);
4133 	}
4134 
4135 	mgmt_pending_remove(cmd);
4136 
4137 unlock:
4138 	hci_dev_unlock(hdev);
4139 }
4140 
4141 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4142 {
4143 	struct mgmt_mode *cp = data;
4144 	struct pending_cmd *cmd;
4145 	struct hci_request req;
4146 	int err;
4147 
4148 	BT_DBG("request for %s", hdev->name);
4149 
4150 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4151 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4152 				  MGMT_STATUS_NOT_SUPPORTED);
4153 
4154 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4155 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4156 				  MGMT_STATUS_REJECTED);
4157 
4158 	if (cp->val != 0x00 && cp->val != 0x01)
4159 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4160 				  MGMT_STATUS_INVALID_PARAMS);
4161 
4162 	hci_dev_lock(hdev);
4163 
4164 	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4165 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4166 		goto unlock;
4167 	}
4168 
4169 	if (!hdev_is_powered(hdev)) {
4170 		if (!cp->val) {
4171 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4172 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4173 			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4174 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4175 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4176 		}
4177 
4178 		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4179 
4180 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4181 		if (err < 0)
4182 			goto unlock;
4183 
4184 		err = new_settings(hdev, sk);
4185 		goto unlock;
4186 	}
4187 
4188 	/* Reject disabling when powered on */
4189 	if (!cp->val) {
4190 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4191 				 MGMT_STATUS_REJECTED);
4192 		goto unlock;
4193 	}
4194 
4195 	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4196 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4197 				 MGMT_STATUS_BUSY);
4198 		goto unlock;
4199 	}
4200 
4201 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4202 	if (!cmd) {
4203 		err = -ENOMEM;
4204 		goto unlock;
4205 	}
4206 
4207 	/* We need to flip the bit already here so that update_adv_data
4208 	 * generates the correct flags.
4209 	 */
4210 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4211 
4212 	hci_req_init(&req, hdev);
4213 
4214 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4215 		set_bredr_scan(&req);
4216 
4217 	/* Since only the advertising data flags will change, there
4218 	 * is no need to update the scan response data.
4219 	 */
4220 	update_adv_data(&req);
4221 
4222 	err = hci_req_run(&req, set_bredr_complete);
4223 	if (err < 0)
4224 		mgmt_pending_remove(cmd);
4225 
4226 unlock:
4227 	hci_dev_unlock(hdev);
4228 	return err;
4229 }
4230 
4231 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4232 			   void *data, u16 len)
4233 {
4234 	struct mgmt_mode *cp = data;
4235 	struct pending_cmd *cmd;
4236 	u8 val, status;
4237 	int err;
4238 
4239 	BT_DBG("request for %s", hdev->name);
4240 
4241 	status = mgmt_bredr_support(hdev);
4242 	if (status)
4243 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4244 				  status);
4245 
4246 	if (!lmp_sc_capable(hdev) &&
4247 	    !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4248 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4249 				  MGMT_STATUS_NOT_SUPPORTED);
4250 
4251 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4252 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4253 				  MGMT_STATUS_INVALID_PARAMS);
4254 
4255 	hci_dev_lock(hdev);
4256 
4257 	if (!hdev_is_powered(hdev)) {
4258 		bool changed;
4259 
4260 		if (cp->val) {
4261 			changed = !test_and_set_bit(HCI_SC_ENABLED,
4262 						    &hdev->dev_flags);
4263 			if (cp->val == 0x02)
4264 				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4265 			else
4266 				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4267 		} else {
4268 			changed = test_and_clear_bit(HCI_SC_ENABLED,
4269 						     &hdev->dev_flags);
4270 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4271 		}
4272 
4273 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4274 		if (err < 0)
4275 			goto failed;
4276 
4277 		if (changed)
4278 			err = new_settings(hdev, sk);
4279 
4280 		goto failed;
4281 	}
4282 
4283 	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4284 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4285 				 MGMT_STATUS_BUSY);
4286 		goto failed;
4287 	}
4288 
4289 	val = !!cp->val;
4290 
4291 	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4292 	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4293 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4294 		goto failed;
4295 	}
4296 
4297 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4298 	if (!cmd) {
4299 		err = -ENOMEM;
4300 		goto failed;
4301 	}
4302 
4303 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4304 	if (err < 0) {
4305 		mgmt_pending_remove(cmd);
4306 		goto failed;
4307 	}
4308 
4309 	if (cp->val == 0x02)
4310 		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4311 	else
4312 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4313 
4314 failed:
4315 	hci_dev_unlock(hdev);
4316 	return err;
4317 }
4318 
4319 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4320 			  void *data, u16 len)
4321 {
4322 	struct mgmt_mode *cp = data;
4323 	bool changed;
4324 	int err;
4325 
4326 	BT_DBG("request for %s", hdev->name);
4327 
4328 	if (cp->val != 0x00 && cp->val != 0x01)
4329 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4330 				  MGMT_STATUS_INVALID_PARAMS);
4331 
4332 	hci_dev_lock(hdev);
4333 
4334 	if (cp->val)
4335 		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4336 	else
4337 		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4338 
4339 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4340 	if (err < 0)
4341 		goto unlock;
4342 
4343 	if (changed)
4344 		err = new_settings(hdev, sk);
4345 
4346 unlock:
4347 	hci_dev_unlock(hdev);
4348 	return err;
4349 }
4350 
4351 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4352 		       u16 len)
4353 {
4354 	struct mgmt_cp_set_privacy *cp = cp_data;
4355 	bool changed;
4356 	int err;
4357 
4358 	BT_DBG("request for %s", hdev->name);
4359 
4360 	if (!lmp_le_capable(hdev))
4361 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4362 				  MGMT_STATUS_NOT_SUPPORTED);
4363 
4364 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
4365 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4366 				  MGMT_STATUS_INVALID_PARAMS);
4367 
4368 	if (hdev_is_powered(hdev))
4369 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4370 				  MGMT_STATUS_REJECTED);
4371 
4372 	hci_dev_lock(hdev);
4373 
4374 	/* If user space supports this command it is also expected to
4375 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4376 	 */
4377 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4378 
4379 	if (cp->privacy) {
4380 		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4381 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4382 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4383 	} else {
4384 		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4385 		memset(hdev->irk, 0, sizeof(hdev->irk));
4386 		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4387 	}
4388 
4389 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4390 	if (err < 0)
4391 		goto unlock;
4392 
4393 	if (changed)
4394 		err = new_settings(hdev, sk);
4395 
4396 unlock:
4397 	hci_dev_unlock(hdev);
4398 	return err;
4399 }
4400 
4401 static bool irk_is_valid(struct mgmt_irk_info *irk)
4402 {
4403 	switch (irk->addr.type) {
4404 	case BDADDR_LE_PUBLIC:
4405 		return true;
4406 
4407 	case BDADDR_LE_RANDOM:
4408 		/* Two most significant bits shall be set */
4409 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4410 			return false;
4411 		return true;
4412 	}
4413 
4414 	return false;
4415 }
4416 
4417 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4418 		     u16 len)
4419 {
4420 	struct mgmt_cp_load_irks *cp = cp_data;
4421 	u16 irk_count, expected_len;
4422 	int i, err;
4423 
4424 	BT_DBG("request for %s", hdev->name);
4425 
4426 	if (!lmp_le_capable(hdev))
4427 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4428 				  MGMT_STATUS_NOT_SUPPORTED);
4429 
4430 	irk_count = __le16_to_cpu(cp->irk_count);
4431 
4432 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4433 	if (expected_len != len) {
4434 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4435 		       expected_len, len);
4436 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4437 				  MGMT_STATUS_INVALID_PARAMS);
4438 	}
4439 
4440 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4441 
4442 	for (i = 0; i < irk_count; i++) {
4443 		struct mgmt_irk_info *key = &cp->irks[i];
4444 
4445 		if (!irk_is_valid(key))
4446 			return cmd_status(sk, hdev->id,
4447 					  MGMT_OP_LOAD_IRKS,
4448 					  MGMT_STATUS_INVALID_PARAMS);
4449 	}
4450 
4451 	hci_dev_lock(hdev);
4452 
4453 	hci_smp_irks_clear(hdev);
4454 
4455 	for (i = 0; i < irk_count; i++) {
4456 		struct mgmt_irk_info *irk = &cp->irks[i];
4457 		u8 addr_type;
4458 
4459 		if (irk->addr.type == BDADDR_LE_PUBLIC)
4460 			addr_type = ADDR_LE_DEV_PUBLIC;
4461 		else
4462 			addr_type = ADDR_LE_DEV_RANDOM;
4463 
4464 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4465 			    BDADDR_ANY);
4466 	}
4467 
4468 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4469 
4470 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4471 
4472 	hci_dev_unlock(hdev);
4473 
4474 	return err;
4475 }
4476 
4477 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4478 {
4479 	if (key->master != 0x00 && key->master != 0x01)
4480 		return false;
4481 
4482 	switch (key->addr.type) {
4483 	case BDADDR_LE_PUBLIC:
4484 		return true;
4485 
4486 	case BDADDR_LE_RANDOM:
4487 		/* Two most significant bits shall be set */
4488 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4489 			return false;
4490 		return true;
4491 	}
4492 
4493 	return false;
4494 }
4495 
4496 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4497 			       void *cp_data, u16 len)
4498 {
4499 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4500 	u16 key_count, expected_len;
4501 	int i, err;
4502 
4503 	BT_DBG("request for %s", hdev->name);
4504 
4505 	if (!lmp_le_capable(hdev))
4506 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4507 				  MGMT_STATUS_NOT_SUPPORTED);
4508 
4509 	key_count = __le16_to_cpu(cp->key_count);
4510 
4511 	expected_len = sizeof(*cp) + key_count *
4512 					sizeof(struct mgmt_ltk_info);
4513 	if (expected_len != len) {
4514 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4515 		       expected_len, len);
4516 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4517 				  MGMT_STATUS_INVALID_PARAMS);
4518 	}
4519 
4520 	BT_DBG("%s key_count %u", hdev->name, key_count);
4521 
4522 	for (i = 0; i < key_count; i++) {
4523 		struct mgmt_ltk_info *key = &cp->keys[i];
4524 
4525 		if (!ltk_is_valid(key))
4526 			return cmd_status(sk, hdev->id,
4527 					  MGMT_OP_LOAD_LONG_TERM_KEYS,
4528 					  MGMT_STATUS_INVALID_PARAMS);
4529 	}
4530 
4531 	hci_dev_lock(hdev);
4532 
4533 	hci_smp_ltks_clear(hdev);
4534 
4535 	for (i = 0; i < key_count; i++) {
4536 		struct mgmt_ltk_info *key = &cp->keys[i];
4537 		u8 type, addr_type, authenticated;
4538 
4539 		if (key->addr.type == BDADDR_LE_PUBLIC)
4540 			addr_type = ADDR_LE_DEV_PUBLIC;
4541 		else
4542 			addr_type = ADDR_LE_DEV_RANDOM;
4543 
4544 		if (key->master)
4545 			type = HCI_SMP_LTK;
4546 		else
4547 			type = HCI_SMP_LTK_SLAVE;
4548 
4549 		switch (key->type) {
4550 		case MGMT_LTK_UNAUTHENTICATED:
4551 			authenticated = 0x00;
4552 			break;
4553 		case MGMT_LTK_AUTHENTICATED:
4554 			authenticated = 0x01;
4555 			break;
4556 		default:
4557 			continue;
4558 		}
4559 
4560 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4561 			    authenticated, key->val, key->enc_size, key->ediv,
4562 			    key->rand);
4563 	}
4564 
4565 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4566 			   NULL, 0);
4567 
4568 	hci_dev_unlock(hdev);
4569 
4570 	return err;
4571 }
4572 
4573 struct cmd_conn_lookup {
4574 	struct hci_conn *conn;
4575 	bool valid_tx_power;
4576 	u8 mgmt_status;
4577 };
4578 
4579 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4580 {
4581 	struct cmd_conn_lookup *match = data;
4582 	struct mgmt_cp_get_conn_info *cp;
4583 	struct mgmt_rp_get_conn_info rp;
4584 	struct hci_conn *conn = cmd->user_data;
4585 
4586 	if (conn != match->conn)
4587 		return;
4588 
4589 	cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4590 
4591 	memset(&rp, 0, sizeof(rp));
4592 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4593 	rp.addr.type = cp->addr.type;
4594 
4595 	if (!match->mgmt_status) {
4596 		rp.rssi = conn->rssi;
4597 
4598 		if (match->valid_tx_power) {
4599 			rp.tx_power = conn->tx_power;
4600 			rp.max_tx_power = conn->max_tx_power;
4601 		} else {
4602 			rp.tx_power = HCI_TX_POWER_INVALID;
4603 			rp.max_tx_power = HCI_TX_POWER_INVALID;
4604 		}
4605 	}
4606 
4607 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4608 		     match->mgmt_status, &rp, sizeof(rp));
4609 
4610 	hci_conn_drop(conn);
4611 
4612 	mgmt_pending_remove(cmd);
4613 }
4614 
4615 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4616 {
4617 	struct hci_cp_read_rssi *cp;
4618 	struct hci_conn *conn;
4619 	struct cmd_conn_lookup match;
4620 	u16 handle;
4621 
4622 	BT_DBG("status 0x%02x", status);
4623 
4624 	hci_dev_lock(hdev);
4625 
4626 	/* TX power data is valid in case request completed successfully,
4627 	 * otherwise we assume it's not valid. At the moment we assume that
4628 	 * either both or none of current and max values are valid to keep code
4629 	 * simple.
4630 	 */
4631 	match.valid_tx_power = !status;
4632 
4633 	/* Commands sent in request are either Read RSSI or Read Transmit Power
4634 	 * Level so we check which one was last sent to retrieve connection
4635 	 * handle.  Both commands have handle as first parameter so it's safe to
4636 	 * cast data on the same command struct.
4637 	 *
4638 	 * First command sent is always Read RSSI and we fail only if it fails.
4639 	 * In other case we simply override error to indicate success as we
4640 	 * already remembered if TX power value is actually valid.
4641 	 */
4642 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4643 	if (!cp) {
4644 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4645 		status = 0;
4646 	}
4647 
4648 	if (!cp) {
4649 		BT_ERR("invalid sent_cmd in response");
4650 		goto unlock;
4651 	}
4652 
4653 	handle = __le16_to_cpu(cp->handle);
4654 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4655 	if (!conn) {
4656 		BT_ERR("unknown handle (%d) in response", handle);
4657 		goto unlock;
4658 	}
4659 
4660 	match.conn = conn;
4661 	match.mgmt_status = mgmt_status(status);
4662 
4663 	/* Cache refresh is complete, now reply for mgmt request for given
4664 	 * connection only.
4665 	 */
4666 	mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4667 			     get_conn_info_complete, &match);
4668 
4669 unlock:
4670 	hci_dev_unlock(hdev);
4671 }
4672 
4673 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4674 			 u16 len)
4675 {
4676 	struct mgmt_cp_get_conn_info *cp = data;
4677 	struct mgmt_rp_get_conn_info rp;
4678 	struct hci_conn *conn;
4679 	unsigned long conn_info_age;
4680 	int err = 0;
4681 
4682 	BT_DBG("%s", hdev->name);
4683 
4684 	memset(&rp, 0, sizeof(rp));
4685 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4686 	rp.addr.type = cp->addr.type;
4687 
4688 	if (!bdaddr_type_is_valid(cp->addr.type))
4689 		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4690 				    MGMT_STATUS_INVALID_PARAMS,
4691 				    &rp, sizeof(rp));
4692 
4693 	hci_dev_lock(hdev);
4694 
4695 	if (!hdev_is_powered(hdev)) {
4696 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4697 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4698 		goto unlock;
4699 	}
4700 
4701 	if (cp->addr.type == BDADDR_BREDR)
4702 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4703 					       &cp->addr.bdaddr);
4704 	else
4705 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4706 
4707 	if (!conn || conn->state != BT_CONNECTED) {
4708 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4709 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4710 		goto unlock;
4711 	}
4712 
4713 	/* To avoid client trying to guess when to poll again for information we
4714 	 * calculate conn info age as random value between min/max set in hdev.
4715 	 */
4716 	conn_info_age = hdev->conn_info_min_age +
4717 			prandom_u32_max(hdev->conn_info_max_age -
4718 					hdev->conn_info_min_age);
4719 
4720 	/* Query controller to refresh cached values if they are too old or were
4721 	 * never read.
4722 	 */
4723 	if (time_after(jiffies, conn->conn_info_timestamp +
4724 		       msecs_to_jiffies(conn_info_age)) ||
4725 	    !conn->conn_info_timestamp) {
4726 		struct hci_request req;
4727 		struct hci_cp_read_tx_power req_txp_cp;
4728 		struct hci_cp_read_rssi req_rssi_cp;
4729 		struct pending_cmd *cmd;
4730 
4731 		hci_req_init(&req, hdev);
4732 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
4733 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4734 			    &req_rssi_cp);
4735 
4736 		/* For LE links TX power does not change thus we don't need to
4737 		 * query for it once value is known.
4738 		 */
4739 		if (!bdaddr_type_is_le(cp->addr.type) ||
4740 		    conn->tx_power == HCI_TX_POWER_INVALID) {
4741 			req_txp_cp.handle = cpu_to_le16(conn->handle);
4742 			req_txp_cp.type = 0x00;
4743 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
4744 				    sizeof(req_txp_cp), &req_txp_cp);
4745 		}
4746 
4747 		/* Max TX power needs to be read only once per connection */
4748 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4749 			req_txp_cp.handle = cpu_to_le16(conn->handle);
4750 			req_txp_cp.type = 0x01;
4751 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
4752 				    sizeof(req_txp_cp), &req_txp_cp);
4753 		}
4754 
4755 		err = hci_req_run(&req, conn_info_refresh_complete);
4756 		if (err < 0)
4757 			goto unlock;
4758 
4759 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4760 				       data, len);
4761 		if (!cmd) {
4762 			err = -ENOMEM;
4763 			goto unlock;
4764 		}
4765 
4766 		hci_conn_hold(conn);
4767 		cmd->user_data = conn;
4768 
4769 		conn->conn_info_timestamp = jiffies;
4770 	} else {
4771 		/* Cache is valid, just reply with values cached in hci_conn */
4772 		rp.rssi = conn->rssi;
4773 		rp.tx_power = conn->tx_power;
4774 		rp.max_tx_power = conn->max_tx_power;
4775 
4776 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4777 				   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4778 	}
4779 
4780 unlock:
4781 	hci_dev_unlock(hdev);
4782 	return err;
4783 }
4784 
4785 static const struct mgmt_handler {
4786 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4787 		     u16 data_len);
4788 	bool var_len;
4789 	size_t data_len;
4790 } mgmt_handlers[] = {
4791 	{ NULL }, /* 0x0000 (no command) */
4792 	{ read_version,           false, MGMT_READ_VERSION_SIZE },
4793 	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
4794 	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
4795 	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
4796 	{ set_powered,            false, MGMT_SETTING_SIZE },
4797 	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
4798 	{ set_connectable,        false, MGMT_SETTING_SIZE },
4799 	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
4800 	{ set_pairable,           false, MGMT_SETTING_SIZE },
4801 	{ set_link_security,      false, MGMT_SETTING_SIZE },
4802 	{ set_ssp,                false, MGMT_SETTING_SIZE },
4803 	{ set_hs,                 false, MGMT_SETTING_SIZE },
4804 	{ set_le,                 false, MGMT_SETTING_SIZE },
4805 	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
4806 	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
4807 	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
4808 	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
4809 	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
4810 	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4811 	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
4812 	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
4813 	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
4814 	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4815 	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
4816 	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
4817 	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4818 	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
4819 	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
4820 	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4821 	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
4822 	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4823 	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4824 	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4825 	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4826 	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
4827 	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
4828 	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
4829 	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
4830 	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4831 	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4832 	{ set_advertising,        false, MGMT_SETTING_SIZE },
4833 	{ set_bredr,              false, MGMT_SETTING_SIZE },
4834 	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4835 	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4836 	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
4837 	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
4838 	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
4839 	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
4840 	{ get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },
4841 };
4842 
4843 
4844 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4845 {
4846 	void *buf;
4847 	u8 *cp;
4848 	struct mgmt_hdr *hdr;
4849 	u16 opcode, index, len;
4850 	struct hci_dev *hdev = NULL;
4851 	const struct mgmt_handler *handler;
4852 	int err;
4853 
4854 	BT_DBG("got %zu bytes", msglen);
4855 
4856 	if (msglen < sizeof(*hdr))
4857 		return -EINVAL;
4858 
4859 	buf = kmalloc(msglen, GFP_KERNEL);
4860 	if (!buf)
4861 		return -ENOMEM;
4862 
4863 	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4864 		err = -EFAULT;
4865 		goto done;
4866 	}
4867 
4868 	hdr = buf;
4869 	opcode = __le16_to_cpu(hdr->opcode);
4870 	index = __le16_to_cpu(hdr->index);
4871 	len = __le16_to_cpu(hdr->len);
4872 
4873 	if (len != msglen - sizeof(*hdr)) {
4874 		err = -EINVAL;
4875 		goto done;
4876 	}
4877 
4878 	if (index != MGMT_INDEX_NONE) {
4879 		hdev = hci_dev_get(index);
4880 		if (!hdev) {
4881 			err = cmd_status(sk, index, opcode,
4882 					 MGMT_STATUS_INVALID_INDEX);
4883 			goto done;
4884 		}
4885 
4886 		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4887 		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4888 			err = cmd_status(sk, index, opcode,
4889 					 MGMT_STATUS_INVALID_INDEX);
4890 			goto done;
4891 		}
4892 	}
4893 
4894 	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4895 	    mgmt_handlers[opcode].func == NULL) {
4896 		BT_DBG("Unknown op %u", opcode);
4897 		err = cmd_status(sk, index, opcode,
4898 				 MGMT_STATUS_UNKNOWN_COMMAND);
4899 		goto done;
4900 	}
4901 
4902 	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4903 	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4904 		err = cmd_status(sk, index, opcode,
4905 				 MGMT_STATUS_INVALID_INDEX);
4906 		goto done;
4907 	}
4908 
4909 	handler = &mgmt_handlers[opcode];
4910 
4911 	if ((handler->var_len && len < handler->data_len) ||
4912 	    (!handler->var_len && len != handler->data_len)) {
4913 		err = cmd_status(sk, index, opcode,
4914 				 MGMT_STATUS_INVALID_PARAMS);
4915 		goto done;
4916 	}
4917 
4918 	if (hdev)
4919 		mgmt_init_hdev(sk, hdev);
4920 
4921 	cp = buf + sizeof(*hdr);
4922 
4923 	err = handler->func(sk, hdev, cp, len);
4924 	if (err < 0)
4925 		goto done;
4926 
4927 	err = msglen;
4928 
4929 done:
4930 	if (hdev)
4931 		hci_dev_put(hdev);
4932 
4933 	kfree(buf);
4934 	return err;
4935 }
4936 
4937 void mgmt_index_added(struct hci_dev *hdev)
4938 {
4939 	if (hdev->dev_type != HCI_BREDR)
4940 		return;
4941 
4942 	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4943 }
4944 
4945 void mgmt_index_removed(struct hci_dev *hdev)
4946 {
4947 	u8 status = MGMT_STATUS_INVALID_INDEX;
4948 
4949 	if (hdev->dev_type != HCI_BREDR)
4950 		return;
4951 
4952 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4953 
4954 	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4955 }
4956 
4957 /* This function requires the caller holds hdev->lock */
4958 static void restart_le_auto_conns(struct hci_dev *hdev)
4959 {
4960 	struct hci_conn_params *p;
4961 
4962 	list_for_each_entry(p, &hdev->le_conn_params, list) {
4963 		if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4964 			hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4965 	}
4966 }
4967 
4968 static void powered_complete(struct hci_dev *hdev, u8 status)
4969 {
4970 	struct cmd_lookup match = { NULL, hdev };
4971 
4972 	BT_DBG("status 0x%02x", status);
4973 
4974 	hci_dev_lock(hdev);
4975 
4976 	restart_le_auto_conns(hdev);
4977 
4978 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4979 
4980 	new_settings(hdev, match.sk);
4981 
4982 	hci_dev_unlock(hdev);
4983 
4984 	if (match.sk)
4985 		sock_put(match.sk);
4986 }
4987 
4988 static int powered_update_hci(struct hci_dev *hdev)
4989 {
4990 	struct hci_request req;
4991 	u8 link_sec;
4992 
4993 	hci_req_init(&req, hdev);
4994 
4995 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4996 	    !lmp_host_ssp_capable(hdev)) {
4997 		u8 ssp = 1;
4998 
4999 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5000 	}
5001 
5002 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5003 	    lmp_bredr_capable(hdev)) {
5004 		struct hci_cp_write_le_host_supported cp;
5005 
5006 		cp.le = 1;
5007 		cp.simul = lmp_le_br_capable(hdev);
5008 
5009 		/* Check first if we already have the right
5010 		 * host state (host features set)
5011 		 */
5012 		if (cp.le != lmp_host_le_capable(hdev) ||
5013 		    cp.simul != lmp_host_le_br_capable(hdev))
5014 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5015 				    sizeof(cp), &cp);
5016 	}
5017 
5018 	if (lmp_le_capable(hdev)) {
5019 		/* Make sure the controller has a good default for
5020 		 * advertising data. This also applies to the case
5021 		 * where BR/EDR was toggled during the AUTO_OFF phase.
5022 		 */
5023 		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5024 			update_adv_data(&req);
5025 			update_scan_rsp_data(&req);
5026 		}
5027 
5028 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5029 			enable_advertising(&req);
5030 	}
5031 
5032 	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5033 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5034 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5035 			    sizeof(link_sec), &link_sec);
5036 
5037 	if (lmp_bredr_capable(hdev)) {
5038 		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5039 			set_bredr_scan(&req);
5040 		update_class(&req);
5041 		update_name(&req);
5042 		update_eir(&req);
5043 	}
5044 
5045 	return hci_req_run(&req, powered_complete);
5046 }
5047 
5048 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5049 {
5050 	struct cmd_lookup match = { NULL, hdev };
5051 	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5052 	u8 zero_cod[] = { 0, 0, 0 };
5053 	int err;
5054 
5055 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5056 		return 0;
5057 
5058 	if (powered) {
5059 		if (powered_update_hci(hdev) == 0)
5060 			return 0;
5061 
5062 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5063 				     &match);
5064 		goto new_settings;
5065 	}
5066 
5067 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5068 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5069 
5070 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5071 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5072 			   zero_cod, sizeof(zero_cod), NULL);
5073 
5074 new_settings:
5075 	err = new_settings(hdev, match.sk);
5076 
5077 	if (match.sk)
5078 		sock_put(match.sk);
5079 
5080 	return err;
5081 }
5082 
5083 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5084 {
5085 	struct pending_cmd *cmd;
5086 	u8 status;
5087 
5088 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5089 	if (!cmd)
5090 		return;
5091 
5092 	if (err == -ERFKILL)
5093 		status = MGMT_STATUS_RFKILLED;
5094 	else
5095 		status = MGMT_STATUS_FAILED;
5096 
5097 	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5098 
5099 	mgmt_pending_remove(cmd);
5100 }
5101 
5102 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5103 {
5104 	struct hci_request req;
5105 
5106 	hci_dev_lock(hdev);
5107 
5108 	/* When discoverable timeout triggers, then just make sure
5109 	 * the limited discoverable flag is cleared. Even in the case
5110 	 * of a timeout triggered from general discoverable, it is
5111 	 * safe to unconditionally clear the flag.
5112 	 */
5113 	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5114 	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5115 
5116 	hci_req_init(&req, hdev);
5117 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5118 		u8 scan = SCAN_PAGE;
5119 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5120 			    sizeof(scan), &scan);
5121 	}
5122 	update_class(&req);
5123 	update_adv_data(&req);
5124 	hci_req_run(&req, NULL);
5125 
5126 	hdev->discov_timeout = 0;
5127 
5128 	new_settings(hdev, NULL);
5129 
5130 	hci_dev_unlock(hdev);
5131 }
5132 
5133 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5134 {
5135 	bool changed;
5136 
5137 	/* Nothing needed here if there's a pending command since that
5138 	 * commands request completion callback takes care of everything
5139 	 * necessary.
5140 	 */
5141 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5142 		return;
5143 
5144 	/* Powering off may clear the scan mode - don't let that interfere */
5145 	if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5146 		return;
5147 
5148 	if (discoverable) {
5149 		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5150 	} else {
5151 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5152 		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5153 	}
5154 
5155 	if (changed) {
5156 		struct hci_request req;
5157 
5158 		/* In case this change in discoverable was triggered by
5159 		 * a disabling of connectable there could be a need to
5160 		 * update the advertising flags.
5161 		 */
5162 		hci_req_init(&req, hdev);
5163 		update_adv_data(&req);
5164 		hci_req_run(&req, NULL);
5165 
5166 		new_settings(hdev, NULL);
5167 	}
5168 }
5169 
5170 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5171 {
5172 	bool changed;
5173 
5174 	/* Nothing needed here if there's a pending command since that
5175 	 * commands request completion callback takes care of everything
5176 	 * necessary.
5177 	 */
5178 	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5179 		return;
5180 
5181 	/* Powering off may clear the scan mode - don't let that interfere */
5182 	if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5183 		return;
5184 
5185 	if (connectable)
5186 		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5187 	else
5188 		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5189 
5190 	if (changed)
5191 		new_settings(hdev, NULL);
5192 }
5193 
5194 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5195 {
5196 	/* Powering off may stop advertising - don't let that interfere */
5197 	if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5198 		return;
5199 
5200 	if (advertising)
5201 		set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5202 	else
5203 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5204 }
5205 
5206 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5207 {
5208 	u8 mgmt_err = mgmt_status(status);
5209 
5210 	if (scan & SCAN_PAGE)
5211 		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5212 				     cmd_status_rsp, &mgmt_err);
5213 
5214 	if (scan & SCAN_INQUIRY)
5215 		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5216 				     cmd_status_rsp, &mgmt_err);
5217 }
5218 
5219 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5220 		       bool persistent)
5221 {
5222 	struct mgmt_ev_new_link_key ev;
5223 
5224 	memset(&ev, 0, sizeof(ev));
5225 
5226 	ev.store_hint = persistent;
5227 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5228 	ev.key.addr.type = BDADDR_BREDR;
5229 	ev.key.type = key->type;
5230 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5231 	ev.key.pin_len = key->pin_len;
5232 
5233 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5234 }
5235 
5236 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5237 {
5238 	if (ltk->authenticated)
5239 		return MGMT_LTK_AUTHENTICATED;
5240 
5241 	return MGMT_LTK_UNAUTHENTICATED;
5242 }
5243 
5244 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5245 {
5246 	struct mgmt_ev_new_long_term_key ev;
5247 
5248 	memset(&ev, 0, sizeof(ev));
5249 
5250 	/* Devices using resolvable or non-resolvable random addresses
5251 	 * without providing an indentity resolving key don't require
5252 	 * to store long term keys. Their addresses will change the
5253 	 * next time around.
5254 	 *
5255 	 * Only when a remote device provides an identity address
5256 	 * make sure the long term key is stored. If the remote
5257 	 * identity is known, the long term keys are internally
5258 	 * mapped to the identity address. So allow static random
5259 	 * and public addresses here.
5260 	 */
5261 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5262 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
5263 		ev.store_hint = 0x00;
5264 	else
5265 		ev.store_hint = persistent;
5266 
5267 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5268 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5269 	ev.key.type = mgmt_ltk_type(key);
5270 	ev.key.enc_size = key->enc_size;
5271 	ev.key.ediv = key->ediv;
5272 	ev.key.rand = key->rand;
5273 
5274 	if (key->type == HCI_SMP_LTK)
5275 		ev.key.master = 1;
5276 
5277 	memcpy(ev.key.val, key->val, sizeof(key->val));
5278 
5279 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5280 }
5281 
5282 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5283 {
5284 	struct mgmt_ev_new_irk ev;
5285 
5286 	memset(&ev, 0, sizeof(ev));
5287 
5288 	/* For identity resolving keys from devices that are already
5289 	 * using a public address or static random address, do not
5290 	 * ask for storing this key. The identity resolving key really
5291 	 * is only mandatory for devices using resovlable random
5292 	 * addresses.
5293 	 *
5294 	 * Storing all identity resolving keys has the downside that
5295 	 * they will be also loaded on next boot of they system. More
5296 	 * identity resolving keys, means more time during scanning is
5297 	 * needed to actually resolve these addresses.
5298 	 */
5299 	if (bacmp(&irk->rpa, BDADDR_ANY))
5300 		ev.store_hint = 0x01;
5301 	else
5302 		ev.store_hint = 0x00;
5303 
5304 	bacpy(&ev.rpa, &irk->rpa);
5305 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5306 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5307 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5308 
5309 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5310 }
5311 
5312 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5313 		   bool persistent)
5314 {
5315 	struct mgmt_ev_new_csrk ev;
5316 
5317 	memset(&ev, 0, sizeof(ev));
5318 
5319 	/* Devices using resolvable or non-resolvable random addresses
5320 	 * without providing an indentity resolving key don't require
5321 	 * to store signature resolving keys. Their addresses will change
5322 	 * the next time around.
5323 	 *
5324 	 * Only when a remote device provides an identity address
5325 	 * make sure the signature resolving key is stored. So allow
5326 	 * static random and public addresses here.
5327 	 */
5328 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5329 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5330 		ev.store_hint = 0x00;
5331 	else
5332 		ev.store_hint = persistent;
5333 
5334 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5335 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5336 	ev.key.master = csrk->master;
5337 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5338 
5339 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5340 }
5341 
5342 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5343 				  u8 data_len)
5344 {
5345 	eir[eir_len++] = sizeof(type) + data_len;
5346 	eir[eir_len++] = type;
5347 	memcpy(&eir[eir_len], data, data_len);
5348 	eir_len += data_len;
5349 
5350 	return eir_len;
5351 }
5352 
5353 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5354 			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
5355 			   u8 *dev_class)
5356 {
5357 	char buf[512];
5358 	struct mgmt_ev_device_connected *ev = (void *) buf;
5359 	u16 eir_len = 0;
5360 
5361 	bacpy(&ev->addr.bdaddr, bdaddr);
5362 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5363 
5364 	ev->flags = __cpu_to_le32(flags);
5365 
5366 	if (name_len > 0)
5367 		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5368 					  name, name_len);
5369 
5370 	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5371 		eir_len = eir_append_data(ev->eir, eir_len,
5372 					  EIR_CLASS_OF_DEV, dev_class, 3);
5373 
5374 	ev->eir_len = cpu_to_le16(eir_len);
5375 
5376 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5377 		    sizeof(*ev) + eir_len, NULL);
5378 }
5379 
5380 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5381 {
5382 	struct mgmt_cp_disconnect *cp = cmd->param;
5383 	struct sock **sk = data;
5384 	struct mgmt_rp_disconnect rp;
5385 
5386 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5387 	rp.addr.type = cp->addr.type;
5388 
5389 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5390 		     sizeof(rp));
5391 
5392 	*sk = cmd->sk;
5393 	sock_hold(*sk);
5394 
5395 	mgmt_pending_remove(cmd);
5396 }
5397 
5398 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5399 {
5400 	struct hci_dev *hdev = data;
5401 	struct mgmt_cp_unpair_device *cp = cmd->param;
5402 	struct mgmt_rp_unpair_device rp;
5403 
5404 	memset(&rp, 0, sizeof(rp));
5405 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5406 	rp.addr.type = cp->addr.type;
5407 
5408 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5409 
5410 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5411 
5412 	mgmt_pending_remove(cmd);
5413 }
5414 
5415 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5416 			      u8 link_type, u8 addr_type, u8 reason,
5417 			      bool mgmt_connected)
5418 {
5419 	struct mgmt_ev_device_disconnected ev;
5420 	struct pending_cmd *power_off;
5421 	struct sock *sk = NULL;
5422 
5423 	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5424 	if (power_off) {
5425 		struct mgmt_mode *cp = power_off->param;
5426 
5427 		/* The connection is still in hci_conn_hash so test for 1
5428 		 * instead of 0 to know if this is the last one.
5429 		 */
5430 		if (!cp->val && hci_conn_count(hdev) == 1) {
5431 			cancel_delayed_work(&hdev->power_off);
5432 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
5433 		}
5434 	}
5435 
5436 	if (!mgmt_connected)
5437 		return;
5438 
5439 	if (link_type != ACL_LINK && link_type != LE_LINK)
5440 		return;
5441 
5442 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5443 
5444 	bacpy(&ev.addr.bdaddr, bdaddr);
5445 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5446 	ev.reason = reason;
5447 
5448 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5449 
5450 	if (sk)
5451 		sock_put(sk);
5452 
5453 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5454 			     hdev);
5455 }
5456 
5457 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5458 			    u8 link_type, u8 addr_type, u8 status)
5459 {
5460 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5461 	struct mgmt_cp_disconnect *cp;
5462 	struct mgmt_rp_disconnect rp;
5463 	struct pending_cmd *cmd;
5464 
5465 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5466 			     hdev);
5467 
5468 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5469 	if (!cmd)
5470 		return;
5471 
5472 	cp = cmd->param;
5473 
5474 	if (bacmp(bdaddr, &cp->addr.bdaddr))
5475 		return;
5476 
5477 	if (cp->addr.type != bdaddr_type)
5478 		return;
5479 
5480 	bacpy(&rp.addr.bdaddr, bdaddr);
5481 	rp.addr.type = bdaddr_type;
5482 
5483 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5484 		     mgmt_status(status), &rp, sizeof(rp));
5485 
5486 	mgmt_pending_remove(cmd);
5487 }
5488 
5489 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5490 			 u8 addr_type, u8 status)
5491 {
5492 	struct mgmt_ev_connect_failed ev;
5493 	struct pending_cmd *power_off;
5494 
5495 	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5496 	if (power_off) {
5497 		struct mgmt_mode *cp = power_off->param;
5498 
5499 		/* The connection is still in hci_conn_hash so test for 1
5500 		 * instead of 0 to know if this is the last one.
5501 		 */
5502 		if (!cp->val && hci_conn_count(hdev) == 1) {
5503 			cancel_delayed_work(&hdev->power_off);
5504 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
5505 		}
5506 	}
5507 
5508 	bacpy(&ev.addr.bdaddr, bdaddr);
5509 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5510 	ev.status = mgmt_status(status);
5511 
5512 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5513 }
5514 
5515 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5516 {
5517 	struct mgmt_ev_pin_code_request ev;
5518 
5519 	bacpy(&ev.addr.bdaddr, bdaddr);
5520 	ev.addr.type = BDADDR_BREDR;
5521 	ev.secure = secure;
5522 
5523 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5524 }
5525 
5526 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5527 				  u8 status)
5528 {
5529 	struct pending_cmd *cmd;
5530 	struct mgmt_rp_pin_code_reply rp;
5531 
5532 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5533 	if (!cmd)
5534 		return;
5535 
5536 	bacpy(&rp.addr.bdaddr, bdaddr);
5537 	rp.addr.type = BDADDR_BREDR;
5538 
5539 	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5540 		     mgmt_status(status), &rp, sizeof(rp));
5541 
5542 	mgmt_pending_remove(cmd);
5543 }
5544 
5545 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5546 				      u8 status)
5547 {
5548 	struct pending_cmd *cmd;
5549 	struct mgmt_rp_pin_code_reply rp;
5550 
5551 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5552 	if (!cmd)
5553 		return;
5554 
5555 	bacpy(&rp.addr.bdaddr, bdaddr);
5556 	rp.addr.type = BDADDR_BREDR;
5557 
5558 	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5559 		     mgmt_status(status), &rp, sizeof(rp));
5560 
5561 	mgmt_pending_remove(cmd);
5562 }
5563 
5564 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5565 			      u8 link_type, u8 addr_type, u32 value,
5566 			      u8 confirm_hint)
5567 {
5568 	struct mgmt_ev_user_confirm_request ev;
5569 
5570 	BT_DBG("%s", hdev->name);
5571 
5572 	bacpy(&ev.addr.bdaddr, bdaddr);
5573 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5574 	ev.confirm_hint = confirm_hint;
5575 	ev.value = cpu_to_le32(value);
5576 
5577 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5578 			  NULL);
5579 }
5580 
5581 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5582 			      u8 link_type, u8 addr_type)
5583 {
5584 	struct mgmt_ev_user_passkey_request ev;
5585 
5586 	BT_DBG("%s", hdev->name);
5587 
5588 	bacpy(&ev.addr.bdaddr, bdaddr);
5589 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5590 
5591 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5592 			  NULL);
5593 }
5594 
5595 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5596 				      u8 link_type, u8 addr_type, u8 status,
5597 				      u8 opcode)
5598 {
5599 	struct pending_cmd *cmd;
5600 	struct mgmt_rp_user_confirm_reply rp;
5601 	int err;
5602 
5603 	cmd = mgmt_pending_find(opcode, hdev);
5604 	if (!cmd)
5605 		return -ENOENT;
5606 
5607 	bacpy(&rp.addr.bdaddr, bdaddr);
5608 	rp.addr.type = link_to_bdaddr(link_type, addr_type);
5609 	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5610 			   &rp, sizeof(rp));
5611 
5612 	mgmt_pending_remove(cmd);
5613 
5614 	return err;
5615 }
5616 
5617 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5618 				     u8 link_type, u8 addr_type, u8 status)
5619 {
5620 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5621 					  status, MGMT_OP_USER_CONFIRM_REPLY);
5622 }
5623 
5624 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5625 					 u8 link_type, u8 addr_type, u8 status)
5626 {
5627 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5628 					  status,
5629 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
5630 }
5631 
5632 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5633 				     u8 link_type, u8 addr_type, u8 status)
5634 {
5635 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5636 					  status, MGMT_OP_USER_PASSKEY_REPLY);
5637 }
5638 
5639 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5640 					 u8 link_type, u8 addr_type, u8 status)
5641 {
5642 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5643 					  status,
5644 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
5645 }
5646 
5647 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5648 			     u8 link_type, u8 addr_type, u32 passkey,
5649 			     u8 entered)
5650 {
5651 	struct mgmt_ev_passkey_notify ev;
5652 
5653 	BT_DBG("%s", hdev->name);
5654 
5655 	bacpy(&ev.addr.bdaddr, bdaddr);
5656 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5657 	ev.passkey = __cpu_to_le32(passkey);
5658 	ev.entered = entered;
5659 
5660 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5661 }
5662 
5663 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5664 		      u8 addr_type, u8 status)
5665 {
5666 	struct mgmt_ev_auth_failed ev;
5667 
5668 	bacpy(&ev.addr.bdaddr, bdaddr);
5669 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5670 	ev.status = mgmt_status(status);
5671 
5672 	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5673 }
5674 
5675 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5676 {
5677 	struct cmd_lookup match = { NULL, hdev };
5678 	bool changed;
5679 
5680 	if (status) {
5681 		u8 mgmt_err = mgmt_status(status);
5682 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5683 				     cmd_status_rsp, &mgmt_err);
5684 		return;
5685 	}
5686 
5687 	if (test_bit(HCI_AUTH, &hdev->flags))
5688 		changed = !test_and_set_bit(HCI_LINK_SECURITY,
5689 					    &hdev->dev_flags);
5690 	else
5691 		changed = test_and_clear_bit(HCI_LINK_SECURITY,
5692 					     &hdev->dev_flags);
5693 
5694 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5695 			     &match);
5696 
5697 	if (changed)
5698 		new_settings(hdev, match.sk);
5699 
5700 	if (match.sk)
5701 		sock_put(match.sk);
5702 }
5703 
5704 static void clear_eir(struct hci_request *req)
5705 {
5706 	struct hci_dev *hdev = req->hdev;
5707 	struct hci_cp_write_eir cp;
5708 
5709 	if (!lmp_ext_inq_capable(hdev))
5710 		return;
5711 
5712 	memset(hdev->eir, 0, sizeof(hdev->eir));
5713 
5714 	memset(&cp, 0, sizeof(cp));
5715 
5716 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5717 }
5718 
5719 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5720 {
5721 	struct cmd_lookup match = { NULL, hdev };
5722 	struct hci_request req;
5723 	bool changed = false;
5724 
5725 	if (status) {
5726 		u8 mgmt_err = mgmt_status(status);
5727 
5728 		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5729 						 &hdev->dev_flags)) {
5730 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5731 			new_settings(hdev, NULL);
5732 		}
5733 
5734 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5735 				     &mgmt_err);
5736 		return;
5737 	}
5738 
5739 	if (enable) {
5740 		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5741 	} else {
5742 		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5743 		if (!changed)
5744 			changed = test_and_clear_bit(HCI_HS_ENABLED,
5745 						     &hdev->dev_flags);
5746 		else
5747 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5748 	}
5749 
5750 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5751 
5752 	if (changed)
5753 		new_settings(hdev, match.sk);
5754 
5755 	if (match.sk)
5756 		sock_put(match.sk);
5757 
5758 	hci_req_init(&req, hdev);
5759 
5760 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5761 		update_eir(&req);
5762 	else
5763 		clear_eir(&req);
5764 
5765 	hci_req_run(&req, NULL);
5766 }
5767 
5768 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5769 {
5770 	struct cmd_lookup match = { NULL, hdev };
5771 	bool changed = false;
5772 
5773 	if (status) {
5774 		u8 mgmt_err = mgmt_status(status);
5775 
5776 		if (enable) {
5777 			if (test_and_clear_bit(HCI_SC_ENABLED,
5778 					       &hdev->dev_flags))
5779 				new_settings(hdev, NULL);
5780 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5781 		}
5782 
5783 		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5784 				     cmd_status_rsp, &mgmt_err);
5785 		return;
5786 	}
5787 
5788 	if (enable) {
5789 		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5790 	} else {
5791 		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5792 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5793 	}
5794 
5795 	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5796 			     settings_rsp, &match);
5797 
5798 	if (changed)
5799 		new_settings(hdev, match.sk);
5800 
5801 	if (match.sk)
5802 		sock_put(match.sk);
5803 }
5804 
5805 static void sk_lookup(struct pending_cmd *cmd, void *data)
5806 {
5807 	struct cmd_lookup *match = data;
5808 
5809 	if (match->sk == NULL) {
5810 		match->sk = cmd->sk;
5811 		sock_hold(match->sk);
5812 	}
5813 }
5814 
5815 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5816 				    u8 status)
5817 {
5818 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5819 
5820 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5821 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5822 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5823 
5824 	if (!status)
5825 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5826 			   NULL);
5827 
5828 	if (match.sk)
5829 		sock_put(match.sk);
5830 }
5831 
5832 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5833 {
5834 	struct mgmt_cp_set_local_name ev;
5835 	struct pending_cmd *cmd;
5836 
5837 	if (status)
5838 		return;
5839 
5840 	memset(&ev, 0, sizeof(ev));
5841 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5842 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5843 
5844 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5845 	if (!cmd) {
5846 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5847 
5848 		/* If this is a HCI command related to powering on the
5849 		 * HCI dev don't send any mgmt signals.
5850 		 */
5851 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5852 			return;
5853 	}
5854 
5855 	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5856 		   cmd ? cmd->sk : NULL);
5857 }
5858 
5859 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5860 				       u8 *randomizer192, u8 *hash256,
5861 				       u8 *randomizer256, u8 status)
5862 {
5863 	struct pending_cmd *cmd;
5864 
5865 	BT_DBG("%s status %u", hdev->name, status);
5866 
5867 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5868 	if (!cmd)
5869 		return;
5870 
5871 	if (status) {
5872 		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5873 			   mgmt_status(status));
5874 	} else {
5875 		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5876 		    hash256 && randomizer256) {
5877 			struct mgmt_rp_read_local_oob_ext_data rp;
5878 
5879 			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5880 			memcpy(rp.randomizer192, randomizer192,
5881 			       sizeof(rp.randomizer192));
5882 
5883 			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5884 			memcpy(rp.randomizer256, randomizer256,
5885 			       sizeof(rp.randomizer256));
5886 
5887 			cmd_complete(cmd->sk, hdev->id,
5888 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5889 				     &rp, sizeof(rp));
5890 		} else {
5891 			struct mgmt_rp_read_local_oob_data rp;
5892 
5893 			memcpy(rp.hash, hash192, sizeof(rp.hash));
5894 			memcpy(rp.randomizer, randomizer192,
5895 			       sizeof(rp.randomizer));
5896 
5897 			cmd_complete(cmd->sk, hdev->id,
5898 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5899 				     &rp, sizeof(rp));
5900 		}
5901 	}
5902 
5903 	mgmt_pending_remove(cmd);
5904 }
5905 
5906 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5907 		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5908 		       u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5909 		       u8 scan_rsp_len)
5910 {
5911 	char buf[512];
5912 	struct mgmt_ev_device_found *ev = (void *) buf;
5913 	struct smp_irk *irk;
5914 	size_t ev_size;
5915 
5916 	if (!hci_discovery_active(hdev))
5917 		return;
5918 
5919 	/* Make sure that the buffer is big enough. The 5 extra bytes
5920 	 * are for the potential CoD field.
5921 	 */
5922 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5923 		return;
5924 
5925 	memset(buf, 0, sizeof(buf));
5926 
5927 	irk = hci_get_irk(hdev, bdaddr, addr_type);
5928 	if (irk) {
5929 		bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5930 		ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5931 	} else {
5932 		bacpy(&ev->addr.bdaddr, bdaddr);
5933 		ev->addr.type = link_to_bdaddr(link_type, addr_type);
5934 	}
5935 
5936 	ev->rssi = rssi;
5937 	if (cfm_name)
5938 		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5939 	if (!ssp)
5940 		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5941 
5942 	if (eir_len > 0)
5943 		memcpy(ev->eir, eir, eir_len);
5944 
5945 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5946 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5947 					  dev_class, 3);
5948 
5949 	if (scan_rsp_len > 0)
5950 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5951 
5952 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5953 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5954 
5955 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5956 }
5957 
5958 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5959 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5960 {
5961 	struct mgmt_ev_device_found *ev;
5962 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5963 	u16 eir_len;
5964 
5965 	ev = (struct mgmt_ev_device_found *) buf;
5966 
5967 	memset(buf, 0, sizeof(buf));
5968 
5969 	bacpy(&ev->addr.bdaddr, bdaddr);
5970 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5971 	ev->rssi = rssi;
5972 
5973 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5974 				  name_len);
5975 
5976 	ev->eir_len = cpu_to_le16(eir_len);
5977 
5978 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5979 }
5980 
5981 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5982 {
5983 	struct mgmt_ev_discovering ev;
5984 	struct pending_cmd *cmd;
5985 
5986 	BT_DBG("%s discovering %u", hdev->name, discovering);
5987 
5988 	if (discovering)
5989 		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5990 	else
5991 		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5992 
5993 	if (cmd != NULL) {
5994 		u8 type = hdev->discovery.type;
5995 
5996 		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5997 			     sizeof(type));
5998 		mgmt_pending_remove(cmd);
5999 	}
6000 
6001 	memset(&ev, 0, sizeof(ev));
6002 	ev.type = hdev->discovery.type;
6003 	ev.discovering = discovering;
6004 
6005 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6006 }
6007 
6008 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6009 {
6010 	struct pending_cmd *cmd;
6011 	struct mgmt_ev_device_blocked ev;
6012 
6013 	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6014 
6015 	bacpy(&ev.addr.bdaddr, bdaddr);
6016 	ev.addr.type = type;
6017 
6018 	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6019 			  cmd ? cmd->sk : NULL);
6020 }
6021 
6022 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6023 {
6024 	struct pending_cmd *cmd;
6025 	struct mgmt_ev_device_unblocked ev;
6026 
6027 	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6028 
6029 	bacpy(&ev.addr.bdaddr, bdaddr);
6030 	ev.addr.type = type;
6031 
6032 	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6033 			  cmd ? cmd->sk : NULL);
6034 }
6035 
6036 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6037 {
6038 	BT_DBG("%s status %u", hdev->name, status);
6039 
6040 	/* Clear the advertising mgmt setting if we failed to re-enable it */
6041 	if (status) {
6042 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6043 		new_settings(hdev, NULL);
6044 	}
6045 }
6046 
6047 void mgmt_reenable_advertising(struct hci_dev *hdev)
6048 {
6049 	struct hci_request req;
6050 
6051 	if (hci_conn_num(hdev, LE_LINK) > 0)
6052 		return;
6053 
6054 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6055 		return;
6056 
6057 	hci_req_init(&req, hdev);
6058 	enable_advertising(&req);
6059 
6060 	/* If this fails we have no option but to let user space know
6061 	 * that we've disabled advertising.
6062 	 */
6063 	if (hci_req_run(&req, adv_enable_complete) < 0) {
6064 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6065 		new_settings(hdev, NULL);
6066 	}
6067 }
6068