xref: /openbmc/linux/net/bluetooth/mgmt.c (revision d2999e1b)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "smp.h"
36 
37 #define MGMT_VERSION	1
38 #define MGMT_REVISION	6
39 
40 static const u16 mgmt_commands[] = {
41 	MGMT_OP_READ_INDEX_LIST,
42 	MGMT_OP_READ_INFO,
43 	MGMT_OP_SET_POWERED,
44 	MGMT_OP_SET_DISCOVERABLE,
45 	MGMT_OP_SET_CONNECTABLE,
46 	MGMT_OP_SET_FAST_CONNECTABLE,
47 	MGMT_OP_SET_PAIRABLE,
48 	MGMT_OP_SET_LINK_SECURITY,
49 	MGMT_OP_SET_SSP,
50 	MGMT_OP_SET_HS,
51 	MGMT_OP_SET_LE,
52 	MGMT_OP_SET_DEV_CLASS,
53 	MGMT_OP_SET_LOCAL_NAME,
54 	MGMT_OP_ADD_UUID,
55 	MGMT_OP_REMOVE_UUID,
56 	MGMT_OP_LOAD_LINK_KEYS,
57 	MGMT_OP_LOAD_LONG_TERM_KEYS,
58 	MGMT_OP_DISCONNECT,
59 	MGMT_OP_GET_CONNECTIONS,
60 	MGMT_OP_PIN_CODE_REPLY,
61 	MGMT_OP_PIN_CODE_NEG_REPLY,
62 	MGMT_OP_SET_IO_CAPABILITY,
63 	MGMT_OP_PAIR_DEVICE,
64 	MGMT_OP_CANCEL_PAIR_DEVICE,
65 	MGMT_OP_UNPAIR_DEVICE,
66 	MGMT_OP_USER_CONFIRM_REPLY,
67 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 	MGMT_OP_USER_PASSKEY_REPLY,
69 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 	MGMT_OP_READ_LOCAL_OOB_DATA,
71 	MGMT_OP_ADD_REMOTE_OOB_DATA,
72 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 	MGMT_OP_START_DISCOVERY,
74 	MGMT_OP_STOP_DISCOVERY,
75 	MGMT_OP_CONFIRM_NAME,
76 	MGMT_OP_BLOCK_DEVICE,
77 	MGMT_OP_UNBLOCK_DEVICE,
78 	MGMT_OP_SET_DEVICE_ID,
79 	MGMT_OP_SET_ADVERTISING,
80 	MGMT_OP_SET_BREDR,
81 	MGMT_OP_SET_STATIC_ADDRESS,
82 	MGMT_OP_SET_SCAN_PARAMS,
83 	MGMT_OP_SET_SECURE_CONN,
84 	MGMT_OP_SET_DEBUG_KEYS,
85 	MGMT_OP_SET_PRIVACY,
86 	MGMT_OP_LOAD_IRKS,
87 	MGMT_OP_GET_CONN_INFO,
88 };
89 
90 static const u16 mgmt_events[] = {
91 	MGMT_EV_CONTROLLER_ERROR,
92 	MGMT_EV_INDEX_ADDED,
93 	MGMT_EV_INDEX_REMOVED,
94 	MGMT_EV_NEW_SETTINGS,
95 	MGMT_EV_CLASS_OF_DEV_CHANGED,
96 	MGMT_EV_LOCAL_NAME_CHANGED,
97 	MGMT_EV_NEW_LINK_KEY,
98 	MGMT_EV_NEW_LONG_TERM_KEY,
99 	MGMT_EV_DEVICE_CONNECTED,
100 	MGMT_EV_DEVICE_DISCONNECTED,
101 	MGMT_EV_CONNECT_FAILED,
102 	MGMT_EV_PIN_CODE_REQUEST,
103 	MGMT_EV_USER_CONFIRM_REQUEST,
104 	MGMT_EV_USER_PASSKEY_REQUEST,
105 	MGMT_EV_AUTH_FAILED,
106 	MGMT_EV_DEVICE_FOUND,
107 	MGMT_EV_DISCOVERING,
108 	MGMT_EV_DEVICE_BLOCKED,
109 	MGMT_EV_DEVICE_UNBLOCKED,
110 	MGMT_EV_DEVICE_UNPAIRED,
111 	MGMT_EV_PASSKEY_NOTIFY,
112 	MGMT_EV_NEW_IRK,
113 	MGMT_EV_NEW_CSRK,
114 };
115 
116 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
117 
118 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
119 				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
120 
121 struct pending_cmd {
122 	struct list_head list;
123 	u16 opcode;
124 	int index;
125 	void *param;
126 	struct sock *sk;
127 	void *user_data;
128 };
129 
130 /* HCI to MGMT error code conversion table */
131 static u8 mgmt_status_table[] = {
132 	MGMT_STATUS_SUCCESS,
133 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
134 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
135 	MGMT_STATUS_FAILED,		/* Hardware Failure */
136 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
137 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
138 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
139 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
140 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
141 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
142 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
143 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
144 	MGMT_STATUS_BUSY,		/* Command Disallowed */
145 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
146 	MGMT_STATUS_REJECTED,		/* Rejected Security */
147 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
148 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
149 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
150 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
151 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
152 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
153 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
154 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
155 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
156 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
157 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
158 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
159 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
160 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
161 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
162 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
163 	MGMT_STATUS_FAILED,		/* Unspecified Error */
164 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
165 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
166 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
167 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
168 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
169 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
170 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
171 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
172 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
173 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
174 	MGMT_STATUS_FAILED,		/* Transaction Collision */
175 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
176 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
177 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
178 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
179 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
180 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
181 	MGMT_STATUS_FAILED,		/* Slot Violation */
182 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
183 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
184 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
185 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
186 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
187 	MGMT_STATUS_BUSY,		/* Controller Busy */
188 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
189 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
190 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
191 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
192 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
193 };
194 
195 static u8 mgmt_status(u8 hci_status)
196 {
197 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
198 		return mgmt_status_table[hci_status];
199 
200 	return MGMT_STATUS_FAILED;
201 }
202 
203 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
204 {
205 	struct sk_buff *skb;
206 	struct mgmt_hdr *hdr;
207 	struct mgmt_ev_cmd_status *ev;
208 	int err;
209 
210 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
211 
212 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
213 	if (!skb)
214 		return -ENOMEM;
215 
216 	hdr = (void *) skb_put(skb, sizeof(*hdr));
217 
218 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
219 	hdr->index = cpu_to_le16(index);
220 	hdr->len = cpu_to_le16(sizeof(*ev));
221 
222 	ev = (void *) skb_put(skb, sizeof(*ev));
223 	ev->status = status;
224 	ev->opcode = cpu_to_le16(cmd);
225 
226 	err = sock_queue_rcv_skb(sk, skb);
227 	if (err < 0)
228 		kfree_skb(skb);
229 
230 	return err;
231 }
232 
233 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
234 			void *rp, size_t rp_len)
235 {
236 	struct sk_buff *skb;
237 	struct mgmt_hdr *hdr;
238 	struct mgmt_ev_cmd_complete *ev;
239 	int err;
240 
241 	BT_DBG("sock %p", sk);
242 
243 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
244 	if (!skb)
245 		return -ENOMEM;
246 
247 	hdr = (void *) skb_put(skb, sizeof(*hdr));
248 
249 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
250 	hdr->index = cpu_to_le16(index);
251 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
252 
253 	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
254 	ev->opcode = cpu_to_le16(cmd);
255 	ev->status = status;
256 
257 	if (rp)
258 		memcpy(ev->data, rp, rp_len);
259 
260 	err = sock_queue_rcv_skb(sk, skb);
261 	if (err < 0)
262 		kfree_skb(skb);
263 
264 	return err;
265 }
266 
267 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
268 			u16 data_len)
269 {
270 	struct mgmt_rp_read_version rp;
271 
272 	BT_DBG("sock %p", sk);
273 
274 	rp.version = MGMT_VERSION;
275 	rp.revision = cpu_to_le16(MGMT_REVISION);
276 
277 	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
278 			    sizeof(rp));
279 }
280 
281 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
282 			 u16 data_len)
283 {
284 	struct mgmt_rp_read_commands *rp;
285 	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
286 	const u16 num_events = ARRAY_SIZE(mgmt_events);
287 	__le16 *opcode;
288 	size_t rp_size;
289 	int i, err;
290 
291 	BT_DBG("sock %p", sk);
292 
293 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
294 
295 	rp = kmalloc(rp_size, GFP_KERNEL);
296 	if (!rp)
297 		return -ENOMEM;
298 
299 	rp->num_commands = cpu_to_le16(num_commands);
300 	rp->num_events = cpu_to_le16(num_events);
301 
302 	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
303 		put_unaligned_le16(mgmt_commands[i], opcode);
304 
305 	for (i = 0; i < num_events; i++, opcode++)
306 		put_unaligned_le16(mgmt_events[i], opcode);
307 
308 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
309 			   rp_size);
310 	kfree(rp);
311 
312 	return err;
313 }
314 
315 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
316 			   u16 data_len)
317 {
318 	struct mgmt_rp_read_index_list *rp;
319 	struct hci_dev *d;
320 	size_t rp_len;
321 	u16 count;
322 	int err;
323 
324 	BT_DBG("sock %p", sk);
325 
326 	read_lock(&hci_dev_list_lock);
327 
328 	count = 0;
329 	list_for_each_entry(d, &hci_dev_list, list) {
330 		if (d->dev_type == HCI_BREDR)
331 			count++;
332 	}
333 
334 	rp_len = sizeof(*rp) + (2 * count);
335 	rp = kmalloc(rp_len, GFP_ATOMIC);
336 	if (!rp) {
337 		read_unlock(&hci_dev_list_lock);
338 		return -ENOMEM;
339 	}
340 
341 	count = 0;
342 	list_for_each_entry(d, &hci_dev_list, list) {
343 		if (test_bit(HCI_SETUP, &d->dev_flags))
344 			continue;
345 
346 		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
347 			continue;
348 
349 		if (d->dev_type == HCI_BREDR) {
350 			rp->index[count++] = cpu_to_le16(d->id);
351 			BT_DBG("Added hci%u", d->id);
352 		}
353 	}
354 
355 	rp->num_controllers = cpu_to_le16(count);
356 	rp_len = sizeof(*rp) + (2 * count);
357 
358 	read_unlock(&hci_dev_list_lock);
359 
360 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
361 			   rp_len);
362 
363 	kfree(rp);
364 
365 	return err;
366 }
367 
368 static u32 get_supported_settings(struct hci_dev *hdev)
369 {
370 	u32 settings = 0;
371 
372 	settings |= MGMT_SETTING_POWERED;
373 	settings |= MGMT_SETTING_PAIRABLE;
374 	settings |= MGMT_SETTING_DEBUG_KEYS;
375 
376 	if (lmp_bredr_capable(hdev)) {
377 		settings |= MGMT_SETTING_CONNECTABLE;
378 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 		settings |= MGMT_SETTING_DISCOVERABLE;
381 		settings |= MGMT_SETTING_BREDR;
382 		settings |= MGMT_SETTING_LINK_SECURITY;
383 
384 		if (lmp_ssp_capable(hdev)) {
385 			settings |= MGMT_SETTING_SSP;
386 			settings |= MGMT_SETTING_HS;
387 		}
388 
389 		if (lmp_sc_capable(hdev) ||
390 		    test_bit(HCI_FORCE_SC, &hdev->dev_flags))
391 			settings |= MGMT_SETTING_SECURE_CONN;
392 	}
393 
394 	if (lmp_le_capable(hdev)) {
395 		settings |= MGMT_SETTING_LE;
396 		settings |= MGMT_SETTING_ADVERTISING;
397 		settings |= MGMT_SETTING_PRIVACY;
398 	}
399 
400 	return settings;
401 }
402 
403 static u32 get_current_settings(struct hci_dev *hdev)
404 {
405 	u32 settings = 0;
406 
407 	if (hdev_is_powered(hdev))
408 		settings |= MGMT_SETTING_POWERED;
409 
410 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 		settings |= MGMT_SETTING_CONNECTABLE;
412 
413 	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
415 
416 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 		settings |= MGMT_SETTING_DISCOVERABLE;
418 
419 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 		settings |= MGMT_SETTING_PAIRABLE;
421 
422 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
423 		settings |= MGMT_SETTING_BREDR;
424 
425 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 		settings |= MGMT_SETTING_LE;
427 
428 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 		settings |= MGMT_SETTING_LINK_SECURITY;
430 
431 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 		settings |= MGMT_SETTING_SSP;
433 
434 	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 		settings |= MGMT_SETTING_HS;
436 
437 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
438 		settings |= MGMT_SETTING_ADVERTISING;
439 
440 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 		settings |= MGMT_SETTING_SECURE_CONN;
442 
443 	if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
444 		settings |= MGMT_SETTING_DEBUG_KEYS;
445 
446 	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
447 		settings |= MGMT_SETTING_PRIVACY;
448 
449 	return settings;
450 }
451 
452 #define PNP_INFO_SVCLASS_ID		0x1200
453 
454 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
455 {
456 	u8 *ptr = data, *uuids_start = NULL;
457 	struct bt_uuid *uuid;
458 
459 	if (len < 4)
460 		return ptr;
461 
462 	list_for_each_entry(uuid, &hdev->uuids, list) {
463 		u16 uuid16;
464 
465 		if (uuid->size != 16)
466 			continue;
467 
468 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
469 		if (uuid16 < 0x1100)
470 			continue;
471 
472 		if (uuid16 == PNP_INFO_SVCLASS_ID)
473 			continue;
474 
475 		if (!uuids_start) {
476 			uuids_start = ptr;
477 			uuids_start[0] = 1;
478 			uuids_start[1] = EIR_UUID16_ALL;
479 			ptr += 2;
480 		}
481 
482 		/* Stop if not enough space to put next UUID */
483 		if ((ptr - data) + sizeof(u16) > len) {
484 			uuids_start[1] = EIR_UUID16_SOME;
485 			break;
486 		}
487 
488 		*ptr++ = (uuid16 & 0x00ff);
489 		*ptr++ = (uuid16 & 0xff00) >> 8;
490 		uuids_start[0] += sizeof(uuid16);
491 	}
492 
493 	return ptr;
494 }
495 
496 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
497 {
498 	u8 *ptr = data, *uuids_start = NULL;
499 	struct bt_uuid *uuid;
500 
501 	if (len < 6)
502 		return ptr;
503 
504 	list_for_each_entry(uuid, &hdev->uuids, list) {
505 		if (uuid->size != 32)
506 			continue;
507 
508 		if (!uuids_start) {
509 			uuids_start = ptr;
510 			uuids_start[0] = 1;
511 			uuids_start[1] = EIR_UUID32_ALL;
512 			ptr += 2;
513 		}
514 
515 		/* Stop if not enough space to put next UUID */
516 		if ((ptr - data) + sizeof(u32) > len) {
517 			uuids_start[1] = EIR_UUID32_SOME;
518 			break;
519 		}
520 
521 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
522 		ptr += sizeof(u32);
523 		uuids_start[0] += sizeof(u32);
524 	}
525 
526 	return ptr;
527 }
528 
529 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
530 {
531 	u8 *ptr = data, *uuids_start = NULL;
532 	struct bt_uuid *uuid;
533 
534 	if (len < 18)
535 		return ptr;
536 
537 	list_for_each_entry(uuid, &hdev->uuids, list) {
538 		if (uuid->size != 128)
539 			continue;
540 
541 		if (!uuids_start) {
542 			uuids_start = ptr;
543 			uuids_start[0] = 1;
544 			uuids_start[1] = EIR_UUID128_ALL;
545 			ptr += 2;
546 		}
547 
548 		/* Stop if not enough space to put next UUID */
549 		if ((ptr - data) + 16 > len) {
550 			uuids_start[1] = EIR_UUID128_SOME;
551 			break;
552 		}
553 
554 		memcpy(ptr, uuid->uuid, 16);
555 		ptr += 16;
556 		uuids_start[0] += 16;
557 	}
558 
559 	return ptr;
560 }
561 
562 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
563 {
564 	struct pending_cmd *cmd;
565 
566 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
567 		if (cmd->opcode == opcode)
568 			return cmd;
569 	}
570 
571 	return NULL;
572 }
573 
574 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
575 {
576 	u8 ad_len = 0;
577 	size_t name_len;
578 
579 	name_len = strlen(hdev->dev_name);
580 	if (name_len > 0) {
581 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
582 
583 		if (name_len > max_len) {
584 			name_len = max_len;
585 			ptr[1] = EIR_NAME_SHORT;
586 		} else
587 			ptr[1] = EIR_NAME_COMPLETE;
588 
589 		ptr[0] = name_len + 1;
590 
591 		memcpy(ptr + 2, hdev->dev_name, name_len);
592 
593 		ad_len += (name_len + 2);
594 		ptr += (name_len + 2);
595 	}
596 
597 	return ad_len;
598 }
599 
600 static void update_scan_rsp_data(struct hci_request *req)
601 {
602 	struct hci_dev *hdev = req->hdev;
603 	struct hci_cp_le_set_scan_rsp_data cp;
604 	u8 len;
605 
606 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
607 		return;
608 
609 	memset(&cp, 0, sizeof(cp));
610 
611 	len = create_scan_rsp_data(hdev, cp.data);
612 
613 	if (hdev->scan_rsp_data_len == len &&
614 	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
615 		return;
616 
617 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
618 	hdev->scan_rsp_data_len = len;
619 
620 	cp.length = len;
621 
622 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
623 }
624 
625 static u8 get_adv_discov_flags(struct hci_dev *hdev)
626 {
627 	struct pending_cmd *cmd;
628 
629 	/* If there's a pending mgmt command the flags will not yet have
630 	 * their final values, so check for this first.
631 	 */
632 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
633 	if (cmd) {
634 		struct mgmt_mode *cp = cmd->param;
635 		if (cp->val == 0x01)
636 			return LE_AD_GENERAL;
637 		else if (cp->val == 0x02)
638 			return LE_AD_LIMITED;
639 	} else {
640 		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
641 			return LE_AD_LIMITED;
642 		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
643 			return LE_AD_GENERAL;
644 	}
645 
646 	return 0;
647 }
648 
649 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
650 {
651 	u8 ad_len = 0, flags = 0;
652 
653 	flags |= get_adv_discov_flags(hdev);
654 
655 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
656 		flags |= LE_AD_NO_BREDR;
657 
658 	if (flags) {
659 		BT_DBG("adv flags 0x%02x", flags);
660 
661 		ptr[0] = 2;
662 		ptr[1] = EIR_FLAGS;
663 		ptr[2] = flags;
664 
665 		ad_len += 3;
666 		ptr += 3;
667 	}
668 
669 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
670 		ptr[0] = 2;
671 		ptr[1] = EIR_TX_POWER;
672 		ptr[2] = (u8) hdev->adv_tx_power;
673 
674 		ad_len += 3;
675 		ptr += 3;
676 	}
677 
678 	return ad_len;
679 }
680 
681 static void update_adv_data(struct hci_request *req)
682 {
683 	struct hci_dev *hdev = req->hdev;
684 	struct hci_cp_le_set_adv_data cp;
685 	u8 len;
686 
687 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
688 		return;
689 
690 	memset(&cp, 0, sizeof(cp));
691 
692 	len = create_adv_data(hdev, cp.data);
693 
694 	if (hdev->adv_data_len == len &&
695 	    memcmp(cp.data, hdev->adv_data, len) == 0)
696 		return;
697 
698 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
699 	hdev->adv_data_len = len;
700 
701 	cp.length = len;
702 
703 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
704 }
705 
706 static void create_eir(struct hci_dev *hdev, u8 *data)
707 {
708 	u8 *ptr = data;
709 	size_t name_len;
710 
711 	name_len = strlen(hdev->dev_name);
712 
713 	if (name_len > 0) {
714 		/* EIR Data type */
715 		if (name_len > 48) {
716 			name_len = 48;
717 			ptr[1] = EIR_NAME_SHORT;
718 		} else
719 			ptr[1] = EIR_NAME_COMPLETE;
720 
721 		/* EIR Data length */
722 		ptr[0] = name_len + 1;
723 
724 		memcpy(ptr + 2, hdev->dev_name, name_len);
725 
726 		ptr += (name_len + 2);
727 	}
728 
729 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
730 		ptr[0] = 2;
731 		ptr[1] = EIR_TX_POWER;
732 		ptr[2] = (u8) hdev->inq_tx_power;
733 
734 		ptr += 3;
735 	}
736 
737 	if (hdev->devid_source > 0) {
738 		ptr[0] = 9;
739 		ptr[1] = EIR_DEVICE_ID;
740 
741 		put_unaligned_le16(hdev->devid_source, ptr + 2);
742 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
743 		put_unaligned_le16(hdev->devid_product, ptr + 6);
744 		put_unaligned_le16(hdev->devid_version, ptr + 8);
745 
746 		ptr += 10;
747 	}
748 
749 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
752 }
753 
754 static void update_eir(struct hci_request *req)
755 {
756 	struct hci_dev *hdev = req->hdev;
757 	struct hci_cp_write_eir cp;
758 
759 	if (!hdev_is_powered(hdev))
760 		return;
761 
762 	if (!lmp_ext_inq_capable(hdev))
763 		return;
764 
765 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
766 		return;
767 
768 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
769 		return;
770 
771 	memset(&cp, 0, sizeof(cp));
772 
773 	create_eir(hdev, cp.data);
774 
775 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
776 		return;
777 
778 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
779 
780 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
781 }
782 
783 static u8 get_service_classes(struct hci_dev *hdev)
784 {
785 	struct bt_uuid *uuid;
786 	u8 val = 0;
787 
788 	list_for_each_entry(uuid, &hdev->uuids, list)
789 		val |= uuid->svc_hint;
790 
791 	return val;
792 }
793 
794 static void update_class(struct hci_request *req)
795 {
796 	struct hci_dev *hdev = req->hdev;
797 	u8 cod[3];
798 
799 	BT_DBG("%s", hdev->name);
800 
801 	if (!hdev_is_powered(hdev))
802 		return;
803 
804 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
805 		return;
806 
807 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
808 		return;
809 
810 	cod[0] = hdev->minor_class;
811 	cod[1] = hdev->major_class;
812 	cod[2] = get_service_classes(hdev);
813 
814 	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
815 		cod[1] |= 0x20;
816 
817 	if (memcmp(cod, hdev->dev_class, 3) == 0)
818 		return;
819 
820 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
821 }
822 
823 static bool get_connectable(struct hci_dev *hdev)
824 {
825 	struct pending_cmd *cmd;
826 
827 	/* If there's a pending mgmt command the flag will not yet have
828 	 * it's final value, so check for this first.
829 	 */
830 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
831 	if (cmd) {
832 		struct mgmt_mode *cp = cmd->param;
833 		return cp->val;
834 	}
835 
836 	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
837 }
838 
839 static void enable_advertising(struct hci_request *req)
840 {
841 	struct hci_dev *hdev = req->hdev;
842 	struct hci_cp_le_set_adv_param cp;
843 	u8 own_addr_type, enable = 0x01;
844 	bool connectable;
845 
846 	/* Clear the HCI_ADVERTISING bit temporarily so that the
847 	 * hci_update_random_address knows that it's safe to go ahead
848 	 * and write a new random address. The flag will be set back on
849 	 * as soon as the SET_ADV_ENABLE HCI command completes.
850 	 */
851 	clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
852 
853 	connectable = get_connectable(hdev);
854 
855 	/* Set require_privacy to true only when non-connectable
856 	 * advertising is used. In that case it is fine to use a
857 	 * non-resolvable private address.
858 	 */
859 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
860 		return;
861 
862 	memset(&cp, 0, sizeof(cp));
863 	cp.min_interval = cpu_to_le16(0x0800);
864 	cp.max_interval = cpu_to_le16(0x0800);
865 	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
866 	cp.own_address_type = own_addr_type;
867 	cp.channel_map = hdev->le_adv_channel_map;
868 
869 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
870 
871 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
872 }
873 
874 static void disable_advertising(struct hci_request *req)
875 {
876 	u8 enable = 0x00;
877 
878 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
879 }
880 
881 static void service_cache_off(struct work_struct *work)
882 {
883 	struct hci_dev *hdev = container_of(work, struct hci_dev,
884 					    service_cache.work);
885 	struct hci_request req;
886 
887 	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
888 		return;
889 
890 	hci_req_init(&req, hdev);
891 
892 	hci_dev_lock(hdev);
893 
894 	update_eir(&req);
895 	update_class(&req);
896 
897 	hci_dev_unlock(hdev);
898 
899 	hci_req_run(&req, NULL);
900 }
901 
902 static void rpa_expired(struct work_struct *work)
903 {
904 	struct hci_dev *hdev = container_of(work, struct hci_dev,
905 					    rpa_expired.work);
906 	struct hci_request req;
907 
908 	BT_DBG("");
909 
910 	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
911 
912 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
913 	    hci_conn_num(hdev, LE_LINK) > 0)
914 		return;
915 
916 	/* The generation of a new RPA and programming it into the
917 	 * controller happens in the enable_advertising() function.
918 	 */
919 
920 	hci_req_init(&req, hdev);
921 
922 	disable_advertising(&req);
923 	enable_advertising(&req);
924 
925 	hci_req_run(&req, NULL);
926 }
927 
928 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
929 {
930 	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
931 		return;
932 
933 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
934 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
935 
936 	/* Non-mgmt controlled devices get this bit set
937 	 * implicitly so that pairing works for them, however
938 	 * for mgmt we require user-space to explicitly enable
939 	 * it
940 	 */
941 	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
942 }
943 
944 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
945 				void *data, u16 data_len)
946 {
947 	struct mgmt_rp_read_info rp;
948 
949 	BT_DBG("sock %p %s", sk, hdev->name);
950 
951 	hci_dev_lock(hdev);
952 
953 	memset(&rp, 0, sizeof(rp));
954 
955 	bacpy(&rp.bdaddr, &hdev->bdaddr);
956 
957 	rp.version = hdev->hci_ver;
958 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
959 
960 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
961 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
962 
963 	memcpy(rp.dev_class, hdev->dev_class, 3);
964 
965 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
966 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
967 
968 	hci_dev_unlock(hdev);
969 
970 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
971 			    sizeof(rp));
972 }
973 
974 static void mgmt_pending_free(struct pending_cmd *cmd)
975 {
976 	sock_put(cmd->sk);
977 	kfree(cmd->param);
978 	kfree(cmd);
979 }
980 
981 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
982 					    struct hci_dev *hdev, void *data,
983 					    u16 len)
984 {
985 	struct pending_cmd *cmd;
986 
987 	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
988 	if (!cmd)
989 		return NULL;
990 
991 	cmd->opcode = opcode;
992 	cmd->index = hdev->id;
993 
994 	cmd->param = kmalloc(len, GFP_KERNEL);
995 	if (!cmd->param) {
996 		kfree(cmd);
997 		return NULL;
998 	}
999 
1000 	if (data)
1001 		memcpy(cmd->param, data, len);
1002 
1003 	cmd->sk = sk;
1004 	sock_hold(sk);
1005 
1006 	list_add(&cmd->list, &hdev->mgmt_pending);
1007 
1008 	return cmd;
1009 }
1010 
1011 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1012 				 void (*cb)(struct pending_cmd *cmd,
1013 					    void *data),
1014 				 void *data)
1015 {
1016 	struct pending_cmd *cmd, *tmp;
1017 
1018 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1019 		if (opcode > 0 && cmd->opcode != opcode)
1020 			continue;
1021 
1022 		cb(cmd, data);
1023 	}
1024 }
1025 
1026 static void mgmt_pending_remove(struct pending_cmd *cmd)
1027 {
1028 	list_del(&cmd->list);
1029 	mgmt_pending_free(cmd);
1030 }
1031 
1032 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1033 {
1034 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1035 
1036 	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1037 			    sizeof(settings));
1038 }
1039 
1040 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1041 {
1042 	BT_DBG("%s status 0x%02x", hdev->name, status);
1043 
1044 	if (hci_conn_count(hdev) == 0) {
1045 		cancel_delayed_work(&hdev->power_off);
1046 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1047 	}
1048 }
1049 
1050 static void hci_stop_discovery(struct hci_request *req)
1051 {
1052 	struct hci_dev *hdev = req->hdev;
1053 	struct hci_cp_remote_name_req_cancel cp;
1054 	struct inquiry_entry *e;
1055 
1056 	switch (hdev->discovery.state) {
1057 	case DISCOVERY_FINDING:
1058 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1059 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1060 		} else {
1061 			cancel_delayed_work(&hdev->le_scan_disable);
1062 			hci_req_add_le_scan_disable(req);
1063 		}
1064 
1065 		break;
1066 
1067 	case DISCOVERY_RESOLVING:
1068 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 						     NAME_PENDING);
1070 		if (!e)
1071 			return;
1072 
1073 		bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 			    &cp);
1076 
1077 		break;
1078 
1079 	default:
1080 		/* Passive scanning */
1081 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1082 			hci_req_add_le_scan_disable(req);
1083 		break;
1084 	}
1085 }
1086 
1087 static int clean_up_hci_state(struct hci_dev *hdev)
1088 {
1089 	struct hci_request req;
1090 	struct hci_conn *conn;
1091 
1092 	hci_req_init(&req, hdev);
1093 
1094 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1095 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1096 		u8 scan = 0x00;
1097 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1098 	}
1099 
1100 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1101 		disable_advertising(&req);
1102 
1103 	hci_stop_discovery(&req);
1104 
1105 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1106 		struct hci_cp_disconnect dc;
1107 		struct hci_cp_reject_conn_req rej;
1108 
1109 		switch (conn->state) {
1110 		case BT_CONNECTED:
1111 		case BT_CONFIG:
1112 			dc.handle = cpu_to_le16(conn->handle);
1113 			dc.reason = 0x15; /* Terminated due to Power Off */
1114 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1115 			break;
1116 		case BT_CONNECT:
1117 			if (conn->type == LE_LINK)
1118 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1119 					    0, NULL);
1120 			else if (conn->type == ACL_LINK)
1121 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1122 					    6, &conn->dst);
1123 			break;
1124 		case BT_CONNECT2:
1125 			bacpy(&rej.bdaddr, &conn->dst);
1126 			rej.reason = 0x15; /* Terminated due to Power Off */
1127 			if (conn->type == ACL_LINK)
1128 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1129 					    sizeof(rej), &rej);
1130 			else if (conn->type == SCO_LINK)
1131 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1132 					    sizeof(rej), &rej);
1133 			break;
1134 		}
1135 	}
1136 
1137 	return hci_req_run(&req, clean_up_hci_complete);
1138 }
1139 
1140 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1141 		       u16 len)
1142 {
1143 	struct mgmt_mode *cp = data;
1144 	struct pending_cmd *cmd;
1145 	int err;
1146 
1147 	BT_DBG("request for %s", hdev->name);
1148 
1149 	if (cp->val != 0x00 && cp->val != 0x01)
1150 		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1151 				  MGMT_STATUS_INVALID_PARAMS);
1152 
1153 	hci_dev_lock(hdev);
1154 
1155 	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1156 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1157 				 MGMT_STATUS_BUSY);
1158 		goto failed;
1159 	}
1160 
1161 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1162 		cancel_delayed_work(&hdev->power_off);
1163 
1164 		if (cp->val) {
1165 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1166 					 data, len);
1167 			err = mgmt_powered(hdev, 1);
1168 			goto failed;
1169 		}
1170 	}
1171 
1172 	if (!!cp->val == hdev_is_powered(hdev)) {
1173 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1174 		goto failed;
1175 	}
1176 
1177 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1178 	if (!cmd) {
1179 		err = -ENOMEM;
1180 		goto failed;
1181 	}
1182 
1183 	if (cp->val) {
1184 		queue_work(hdev->req_workqueue, &hdev->power_on);
1185 		err = 0;
1186 	} else {
1187 		/* Disconnect connections, stop scans, etc */
1188 		err = clean_up_hci_state(hdev);
1189 		if (!err)
1190 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1191 					   HCI_POWER_OFF_TIMEOUT);
1192 
1193 		/* ENODATA means there were no HCI commands queued */
1194 		if (err == -ENODATA) {
1195 			cancel_delayed_work(&hdev->power_off);
1196 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1197 			err = 0;
1198 		}
1199 	}
1200 
1201 failed:
1202 	hci_dev_unlock(hdev);
1203 	return err;
1204 }
1205 
1206 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1207 		      struct sock *skip_sk)
1208 {
1209 	struct sk_buff *skb;
1210 	struct mgmt_hdr *hdr;
1211 
1212 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1213 	if (!skb)
1214 		return -ENOMEM;
1215 
1216 	hdr = (void *) skb_put(skb, sizeof(*hdr));
1217 	hdr->opcode = cpu_to_le16(event);
1218 	if (hdev)
1219 		hdr->index = cpu_to_le16(hdev->id);
1220 	else
1221 		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1222 	hdr->len = cpu_to_le16(data_len);
1223 
1224 	if (data)
1225 		memcpy(skb_put(skb, data_len), data, data_len);
1226 
1227 	/* Time stamp */
1228 	__net_timestamp(skb);
1229 
1230 	hci_send_to_control(skb, skip_sk);
1231 	kfree_skb(skb);
1232 
1233 	return 0;
1234 }
1235 
1236 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1237 {
1238 	__le32 ev;
1239 
1240 	ev = cpu_to_le32(get_current_settings(hdev));
1241 
1242 	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1243 }
1244 
1245 struct cmd_lookup {
1246 	struct sock *sk;
1247 	struct hci_dev *hdev;
1248 	u8 mgmt_status;
1249 };
1250 
1251 static void settings_rsp(struct pending_cmd *cmd, void *data)
1252 {
1253 	struct cmd_lookup *match = data;
1254 
1255 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1256 
1257 	list_del(&cmd->list);
1258 
1259 	if (match->sk == NULL) {
1260 		match->sk = cmd->sk;
1261 		sock_hold(match->sk);
1262 	}
1263 
1264 	mgmt_pending_free(cmd);
1265 }
1266 
1267 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1268 {
1269 	u8 *status = data;
1270 
1271 	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1272 	mgmt_pending_remove(cmd);
1273 }
1274 
1275 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1276 {
1277 	if (!lmp_bredr_capable(hdev))
1278 		return MGMT_STATUS_NOT_SUPPORTED;
1279 	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1280 		return MGMT_STATUS_REJECTED;
1281 	else
1282 		return MGMT_STATUS_SUCCESS;
1283 }
1284 
1285 static u8 mgmt_le_support(struct hci_dev *hdev)
1286 {
1287 	if (!lmp_le_capable(hdev))
1288 		return MGMT_STATUS_NOT_SUPPORTED;
1289 	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1290 		return MGMT_STATUS_REJECTED;
1291 	else
1292 		return MGMT_STATUS_SUCCESS;
1293 }
1294 
1295 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1296 {
1297 	struct pending_cmd *cmd;
1298 	struct mgmt_mode *cp;
1299 	struct hci_request req;
1300 	bool changed;
1301 
1302 	BT_DBG("status 0x%02x", status);
1303 
1304 	hci_dev_lock(hdev);
1305 
1306 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1307 	if (!cmd)
1308 		goto unlock;
1309 
1310 	if (status) {
1311 		u8 mgmt_err = mgmt_status(status);
1312 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1313 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1314 		goto remove_cmd;
1315 	}
1316 
1317 	cp = cmd->param;
1318 	if (cp->val) {
1319 		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1320 					    &hdev->dev_flags);
1321 
1322 		if (hdev->discov_timeout > 0) {
1323 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1324 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1325 					   to);
1326 		}
1327 	} else {
1328 		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1329 					     &hdev->dev_flags);
1330 	}
1331 
1332 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1333 
1334 	if (changed)
1335 		new_settings(hdev, cmd->sk);
1336 
1337 	/* When the discoverable mode gets changed, make sure
1338 	 * that class of device has the limited discoverable
1339 	 * bit correctly set.
1340 	 */
1341 	hci_req_init(&req, hdev);
1342 	update_class(&req);
1343 	hci_req_run(&req, NULL);
1344 
1345 remove_cmd:
1346 	mgmt_pending_remove(cmd);
1347 
1348 unlock:
1349 	hci_dev_unlock(hdev);
1350 }
1351 
1352 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1353 			    u16 len)
1354 {
1355 	struct mgmt_cp_set_discoverable *cp = data;
1356 	struct pending_cmd *cmd;
1357 	struct hci_request req;
1358 	u16 timeout;
1359 	u8 scan;
1360 	int err;
1361 
1362 	BT_DBG("request for %s", hdev->name);
1363 
1364 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1365 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1366 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1367 				  MGMT_STATUS_REJECTED);
1368 
1369 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1370 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1371 				  MGMT_STATUS_INVALID_PARAMS);
1372 
1373 	timeout = __le16_to_cpu(cp->timeout);
1374 
1375 	/* Disabling discoverable requires that no timeout is set,
1376 	 * and enabling limited discoverable requires a timeout.
1377 	 */
1378 	if ((cp->val == 0x00 && timeout > 0) ||
1379 	    (cp->val == 0x02 && timeout == 0))
1380 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1381 				  MGMT_STATUS_INVALID_PARAMS);
1382 
1383 	hci_dev_lock(hdev);
1384 
1385 	if (!hdev_is_powered(hdev) && timeout > 0) {
1386 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1387 				 MGMT_STATUS_NOT_POWERED);
1388 		goto failed;
1389 	}
1390 
1391 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1392 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1393 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1394 				 MGMT_STATUS_BUSY);
1395 		goto failed;
1396 	}
1397 
1398 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1399 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 				 MGMT_STATUS_REJECTED);
1401 		goto failed;
1402 	}
1403 
1404 	if (!hdev_is_powered(hdev)) {
1405 		bool changed = false;
1406 
1407 		/* Setting limited discoverable when powered off is
1408 		 * not a valid operation since it requires a timeout
1409 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1410 		 */
1411 		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1412 			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1413 			changed = true;
1414 		}
1415 
1416 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 		if (err < 0)
1418 			goto failed;
1419 
1420 		if (changed)
1421 			err = new_settings(hdev, sk);
1422 
1423 		goto failed;
1424 	}
1425 
1426 	/* If the current mode is the same, then just update the timeout
1427 	 * value with the new value. And if only the timeout gets updated,
1428 	 * then no need for any HCI transactions.
1429 	 */
1430 	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1431 	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1432 					  &hdev->dev_flags)) {
1433 		cancel_delayed_work(&hdev->discov_off);
1434 		hdev->discov_timeout = timeout;
1435 
1436 		if (cp->val && hdev->discov_timeout > 0) {
1437 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1438 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1439 					   to);
1440 		}
1441 
1442 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1443 		goto failed;
1444 	}
1445 
1446 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1447 	if (!cmd) {
1448 		err = -ENOMEM;
1449 		goto failed;
1450 	}
1451 
1452 	/* Cancel any potential discoverable timeout that might be
1453 	 * still active and store new timeout value. The arming of
1454 	 * the timeout happens in the complete handler.
1455 	 */
1456 	cancel_delayed_work(&hdev->discov_off);
1457 	hdev->discov_timeout = timeout;
1458 
1459 	/* Limited discoverable mode */
1460 	if (cp->val == 0x02)
1461 		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1462 	else
1463 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1464 
1465 	hci_req_init(&req, hdev);
1466 
1467 	/* The procedure for LE-only controllers is much simpler - just
1468 	 * update the advertising data.
1469 	 */
1470 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1471 		goto update_ad;
1472 
1473 	scan = SCAN_PAGE;
1474 
1475 	if (cp->val) {
1476 		struct hci_cp_write_current_iac_lap hci_cp;
1477 
1478 		if (cp->val == 0x02) {
1479 			/* Limited discoverable mode */
1480 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1481 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1482 			hci_cp.iac_lap[1] = 0x8b;
1483 			hci_cp.iac_lap[2] = 0x9e;
1484 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1485 			hci_cp.iac_lap[4] = 0x8b;
1486 			hci_cp.iac_lap[5] = 0x9e;
1487 		} else {
1488 			/* General discoverable mode */
1489 			hci_cp.num_iac = 1;
1490 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1491 			hci_cp.iac_lap[1] = 0x8b;
1492 			hci_cp.iac_lap[2] = 0x9e;
1493 		}
1494 
1495 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1496 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1497 
1498 		scan |= SCAN_INQUIRY;
1499 	} else {
1500 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1501 	}
1502 
1503 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1504 
1505 update_ad:
1506 	update_adv_data(&req);
1507 
1508 	err = hci_req_run(&req, set_discoverable_complete);
1509 	if (err < 0)
1510 		mgmt_pending_remove(cmd);
1511 
1512 failed:
1513 	hci_dev_unlock(hdev);
1514 	return err;
1515 }
1516 
1517 static void write_fast_connectable(struct hci_request *req, bool enable)
1518 {
1519 	struct hci_dev *hdev = req->hdev;
1520 	struct hci_cp_write_page_scan_activity acp;
1521 	u8 type;
1522 
1523 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1524 		return;
1525 
1526 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1527 		return;
1528 
1529 	if (enable) {
1530 		type = PAGE_SCAN_TYPE_INTERLACED;
1531 
1532 		/* 160 msec page scan interval */
1533 		acp.interval = cpu_to_le16(0x0100);
1534 	} else {
1535 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1536 
1537 		/* default 1.28 sec page scan */
1538 		acp.interval = cpu_to_le16(0x0800);
1539 	}
1540 
1541 	acp.window = cpu_to_le16(0x0012);
1542 
1543 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1544 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1545 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1546 			    sizeof(acp), &acp);
1547 
1548 	if (hdev->page_scan_type != type)
1549 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1550 }
1551 
1552 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1553 {
1554 	struct pending_cmd *cmd;
1555 	struct mgmt_mode *cp;
1556 	bool changed;
1557 
1558 	BT_DBG("status 0x%02x", status);
1559 
1560 	hci_dev_lock(hdev);
1561 
1562 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1563 	if (!cmd)
1564 		goto unlock;
1565 
1566 	if (status) {
1567 		u8 mgmt_err = mgmt_status(status);
1568 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1569 		goto remove_cmd;
1570 	}
1571 
1572 	cp = cmd->param;
1573 	if (cp->val)
1574 		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1575 	else
1576 		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1577 
1578 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1579 
1580 	if (changed)
1581 		new_settings(hdev, cmd->sk);
1582 
1583 remove_cmd:
1584 	mgmt_pending_remove(cmd);
1585 
1586 unlock:
1587 	hci_dev_unlock(hdev);
1588 }
1589 
1590 static int set_connectable_update_settings(struct hci_dev *hdev,
1591 					   struct sock *sk, u8 val)
1592 {
1593 	bool changed = false;
1594 	int err;
1595 
1596 	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1597 		changed = true;
1598 
1599 	if (val) {
1600 		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1601 	} else {
1602 		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1603 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1604 	}
1605 
1606 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1607 	if (err < 0)
1608 		return err;
1609 
1610 	if (changed)
1611 		return new_settings(hdev, sk);
1612 
1613 	return 0;
1614 }
1615 
1616 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1617 			   u16 len)
1618 {
1619 	struct mgmt_mode *cp = data;
1620 	struct pending_cmd *cmd;
1621 	struct hci_request req;
1622 	u8 scan;
1623 	int err;
1624 
1625 	BT_DBG("request for %s", hdev->name);
1626 
1627 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1628 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1629 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1630 				  MGMT_STATUS_REJECTED);
1631 
1632 	if (cp->val != 0x00 && cp->val != 0x01)
1633 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1634 				  MGMT_STATUS_INVALID_PARAMS);
1635 
1636 	hci_dev_lock(hdev);
1637 
1638 	if (!hdev_is_powered(hdev)) {
1639 		err = set_connectable_update_settings(hdev, sk, cp->val);
1640 		goto failed;
1641 	}
1642 
1643 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1644 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1645 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1646 				 MGMT_STATUS_BUSY);
1647 		goto failed;
1648 	}
1649 
1650 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1651 	if (!cmd) {
1652 		err = -ENOMEM;
1653 		goto failed;
1654 	}
1655 
1656 	hci_req_init(&req, hdev);
1657 
1658 	/* If BR/EDR is not enabled and we disable advertising as a
1659 	 * by-product of disabling connectable, we need to update the
1660 	 * advertising flags.
1661 	 */
1662 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1663 		if (!cp->val) {
1664 			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1665 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1666 		}
1667 		update_adv_data(&req);
1668 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1669 		if (cp->val) {
1670 			scan = SCAN_PAGE;
1671 		} else {
1672 			scan = 0;
1673 
1674 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1675 			    hdev->discov_timeout > 0)
1676 				cancel_delayed_work(&hdev->discov_off);
1677 		}
1678 
1679 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1680 	}
1681 
1682 	/* If we're going from non-connectable to connectable or
1683 	 * vice-versa when fast connectable is enabled ensure that fast
1684 	 * connectable gets disabled. write_fast_connectable won't do
1685 	 * anything if the page scan parameters are already what they
1686 	 * should be.
1687 	 */
1688 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1689 		write_fast_connectable(&req, false);
1690 
1691 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1692 	    hci_conn_num(hdev, LE_LINK) == 0) {
1693 		disable_advertising(&req);
1694 		enable_advertising(&req);
1695 	}
1696 
1697 	err = hci_req_run(&req, set_connectable_complete);
1698 	if (err < 0) {
1699 		mgmt_pending_remove(cmd);
1700 		if (err == -ENODATA)
1701 			err = set_connectable_update_settings(hdev, sk,
1702 							      cp->val);
1703 		goto failed;
1704 	}
1705 
1706 failed:
1707 	hci_dev_unlock(hdev);
1708 	return err;
1709 }
1710 
1711 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1712 			u16 len)
1713 {
1714 	struct mgmt_mode *cp = data;
1715 	bool changed;
1716 	int err;
1717 
1718 	BT_DBG("request for %s", hdev->name);
1719 
1720 	if (cp->val != 0x00 && cp->val != 0x01)
1721 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1722 				  MGMT_STATUS_INVALID_PARAMS);
1723 
1724 	hci_dev_lock(hdev);
1725 
1726 	if (cp->val)
1727 		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1728 	else
1729 		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1730 
1731 	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1732 	if (err < 0)
1733 		goto unlock;
1734 
1735 	if (changed)
1736 		err = new_settings(hdev, sk);
1737 
1738 unlock:
1739 	hci_dev_unlock(hdev);
1740 	return err;
1741 }
1742 
1743 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1744 			     u16 len)
1745 {
1746 	struct mgmt_mode *cp = data;
1747 	struct pending_cmd *cmd;
1748 	u8 val, status;
1749 	int err;
1750 
1751 	BT_DBG("request for %s", hdev->name);
1752 
1753 	status = mgmt_bredr_support(hdev);
1754 	if (status)
1755 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1756 				  status);
1757 
1758 	if (cp->val != 0x00 && cp->val != 0x01)
1759 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1760 				  MGMT_STATUS_INVALID_PARAMS);
1761 
1762 	hci_dev_lock(hdev);
1763 
1764 	if (!hdev_is_powered(hdev)) {
1765 		bool changed = false;
1766 
1767 		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1768 					  &hdev->dev_flags)) {
1769 			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1770 			changed = true;
1771 		}
1772 
1773 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1774 		if (err < 0)
1775 			goto failed;
1776 
1777 		if (changed)
1778 			err = new_settings(hdev, sk);
1779 
1780 		goto failed;
1781 	}
1782 
1783 	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1784 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1785 				 MGMT_STATUS_BUSY);
1786 		goto failed;
1787 	}
1788 
1789 	val = !!cp->val;
1790 
1791 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1792 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1793 		goto failed;
1794 	}
1795 
1796 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1797 	if (!cmd) {
1798 		err = -ENOMEM;
1799 		goto failed;
1800 	}
1801 
1802 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1803 	if (err < 0) {
1804 		mgmt_pending_remove(cmd);
1805 		goto failed;
1806 	}
1807 
1808 failed:
1809 	hci_dev_unlock(hdev);
1810 	return err;
1811 }
1812 
1813 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1814 {
1815 	struct mgmt_mode *cp = data;
1816 	struct pending_cmd *cmd;
1817 	u8 status;
1818 	int err;
1819 
1820 	BT_DBG("request for %s", hdev->name);
1821 
1822 	status = mgmt_bredr_support(hdev);
1823 	if (status)
1824 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1825 
1826 	if (!lmp_ssp_capable(hdev))
1827 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1828 				  MGMT_STATUS_NOT_SUPPORTED);
1829 
1830 	if (cp->val != 0x00 && cp->val != 0x01)
1831 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1832 				  MGMT_STATUS_INVALID_PARAMS);
1833 
1834 	hci_dev_lock(hdev);
1835 
1836 	if (!hdev_is_powered(hdev)) {
1837 		bool changed;
1838 
1839 		if (cp->val) {
1840 			changed = !test_and_set_bit(HCI_SSP_ENABLED,
1841 						    &hdev->dev_flags);
1842 		} else {
1843 			changed = test_and_clear_bit(HCI_SSP_ENABLED,
1844 						     &hdev->dev_flags);
1845 			if (!changed)
1846 				changed = test_and_clear_bit(HCI_HS_ENABLED,
1847 							     &hdev->dev_flags);
1848 			else
1849 				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1850 		}
1851 
1852 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1853 		if (err < 0)
1854 			goto failed;
1855 
1856 		if (changed)
1857 			err = new_settings(hdev, sk);
1858 
1859 		goto failed;
1860 	}
1861 
1862 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1863 	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1864 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1865 				 MGMT_STATUS_BUSY);
1866 		goto failed;
1867 	}
1868 
1869 	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1870 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1871 		goto failed;
1872 	}
1873 
1874 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1875 	if (!cmd) {
1876 		err = -ENOMEM;
1877 		goto failed;
1878 	}
1879 
1880 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1881 	if (err < 0) {
1882 		mgmt_pending_remove(cmd);
1883 		goto failed;
1884 	}
1885 
1886 failed:
1887 	hci_dev_unlock(hdev);
1888 	return err;
1889 }
1890 
1891 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1892 {
1893 	struct mgmt_mode *cp = data;
1894 	bool changed;
1895 	u8 status;
1896 	int err;
1897 
1898 	BT_DBG("request for %s", hdev->name);
1899 
1900 	status = mgmt_bredr_support(hdev);
1901 	if (status)
1902 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1903 
1904 	if (!lmp_ssp_capable(hdev))
1905 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1906 				  MGMT_STATUS_NOT_SUPPORTED);
1907 
1908 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1909 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1910 				  MGMT_STATUS_REJECTED);
1911 
1912 	if (cp->val != 0x00 && cp->val != 0x01)
1913 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1914 				  MGMT_STATUS_INVALID_PARAMS);
1915 
1916 	hci_dev_lock(hdev);
1917 
1918 	if (cp->val) {
1919 		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1920 	} else {
1921 		if (hdev_is_powered(hdev)) {
1922 			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1923 					 MGMT_STATUS_REJECTED);
1924 			goto unlock;
1925 		}
1926 
1927 		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1928 	}
1929 
1930 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1931 	if (err < 0)
1932 		goto unlock;
1933 
1934 	if (changed)
1935 		err = new_settings(hdev, sk);
1936 
1937 unlock:
1938 	hci_dev_unlock(hdev);
1939 	return err;
1940 }
1941 
1942 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1943 {
1944 	struct cmd_lookup match = { NULL, hdev };
1945 
1946 	if (status) {
1947 		u8 mgmt_err = mgmt_status(status);
1948 
1949 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1950 				     &mgmt_err);
1951 		return;
1952 	}
1953 
1954 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1955 
1956 	new_settings(hdev, match.sk);
1957 
1958 	if (match.sk)
1959 		sock_put(match.sk);
1960 
1961 	/* Make sure the controller has a good default for
1962 	 * advertising data. Restrict the update to when LE
1963 	 * has actually been enabled. During power on, the
1964 	 * update in powered_update_hci will take care of it.
1965 	 */
1966 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1967 		struct hci_request req;
1968 
1969 		hci_dev_lock(hdev);
1970 
1971 		hci_req_init(&req, hdev);
1972 		update_adv_data(&req);
1973 		update_scan_rsp_data(&req);
1974 		hci_req_run(&req, NULL);
1975 
1976 		hci_dev_unlock(hdev);
1977 	}
1978 }
1979 
1980 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1981 {
1982 	struct mgmt_mode *cp = data;
1983 	struct hci_cp_write_le_host_supported hci_cp;
1984 	struct pending_cmd *cmd;
1985 	struct hci_request req;
1986 	int err;
1987 	u8 val, enabled;
1988 
1989 	BT_DBG("request for %s", hdev->name);
1990 
1991 	if (!lmp_le_capable(hdev))
1992 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1993 				  MGMT_STATUS_NOT_SUPPORTED);
1994 
1995 	if (cp->val != 0x00 && cp->val != 0x01)
1996 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1997 				  MGMT_STATUS_INVALID_PARAMS);
1998 
1999 	/* LE-only devices do not allow toggling LE on/off */
2000 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2001 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2002 				  MGMT_STATUS_REJECTED);
2003 
2004 	hci_dev_lock(hdev);
2005 
2006 	val = !!cp->val;
2007 	enabled = lmp_host_le_capable(hdev);
2008 
2009 	if (!hdev_is_powered(hdev) || val == enabled) {
2010 		bool changed = false;
2011 
2012 		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2013 			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2014 			changed = true;
2015 		}
2016 
2017 		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2018 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2019 			changed = true;
2020 		}
2021 
2022 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2023 		if (err < 0)
2024 			goto unlock;
2025 
2026 		if (changed)
2027 			err = new_settings(hdev, sk);
2028 
2029 		goto unlock;
2030 	}
2031 
2032 	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2033 	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2034 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2035 				 MGMT_STATUS_BUSY);
2036 		goto unlock;
2037 	}
2038 
2039 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2040 	if (!cmd) {
2041 		err = -ENOMEM;
2042 		goto unlock;
2043 	}
2044 
2045 	hci_req_init(&req, hdev);
2046 
2047 	memset(&hci_cp, 0, sizeof(hci_cp));
2048 
2049 	if (val) {
2050 		hci_cp.le = val;
2051 		hci_cp.simul = lmp_le_br_capable(hdev);
2052 	} else {
2053 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2054 			disable_advertising(&req);
2055 	}
2056 
2057 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2058 		    &hci_cp);
2059 
2060 	err = hci_req_run(&req, le_enable_complete);
2061 	if (err < 0)
2062 		mgmt_pending_remove(cmd);
2063 
2064 unlock:
2065 	hci_dev_unlock(hdev);
2066 	return err;
2067 }
2068 
2069 /* This is a helper function to test for pending mgmt commands that can
2070  * cause CoD or EIR HCI commands. We can only allow one such pending
2071  * mgmt command at a time since otherwise we cannot easily track what
2072  * the current values are, will be, and based on that calculate if a new
2073  * HCI command needs to be sent and if yes with what value.
2074  */
2075 static bool pending_eir_or_class(struct hci_dev *hdev)
2076 {
2077 	struct pending_cmd *cmd;
2078 
2079 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2080 		switch (cmd->opcode) {
2081 		case MGMT_OP_ADD_UUID:
2082 		case MGMT_OP_REMOVE_UUID:
2083 		case MGMT_OP_SET_DEV_CLASS:
2084 		case MGMT_OP_SET_POWERED:
2085 			return true;
2086 		}
2087 	}
2088 
2089 	return false;
2090 }
2091 
2092 static const u8 bluetooth_base_uuid[] = {
2093 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2094 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2095 };
2096 
2097 static u8 get_uuid_size(const u8 *uuid)
2098 {
2099 	u32 val;
2100 
2101 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2102 		return 128;
2103 
2104 	val = get_unaligned_le32(&uuid[12]);
2105 	if (val > 0xffff)
2106 		return 32;
2107 
2108 	return 16;
2109 }
2110 
2111 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2112 {
2113 	struct pending_cmd *cmd;
2114 
2115 	hci_dev_lock(hdev);
2116 
2117 	cmd = mgmt_pending_find(mgmt_op, hdev);
2118 	if (!cmd)
2119 		goto unlock;
2120 
2121 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2122 		     hdev->dev_class, 3);
2123 
2124 	mgmt_pending_remove(cmd);
2125 
2126 unlock:
2127 	hci_dev_unlock(hdev);
2128 }
2129 
2130 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2131 {
2132 	BT_DBG("status 0x%02x", status);
2133 
2134 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2135 }
2136 
2137 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2138 {
2139 	struct mgmt_cp_add_uuid *cp = data;
2140 	struct pending_cmd *cmd;
2141 	struct hci_request req;
2142 	struct bt_uuid *uuid;
2143 	int err;
2144 
2145 	BT_DBG("request for %s", hdev->name);
2146 
2147 	hci_dev_lock(hdev);
2148 
2149 	if (pending_eir_or_class(hdev)) {
2150 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2151 				 MGMT_STATUS_BUSY);
2152 		goto failed;
2153 	}
2154 
2155 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2156 	if (!uuid) {
2157 		err = -ENOMEM;
2158 		goto failed;
2159 	}
2160 
2161 	memcpy(uuid->uuid, cp->uuid, 16);
2162 	uuid->svc_hint = cp->svc_hint;
2163 	uuid->size = get_uuid_size(cp->uuid);
2164 
2165 	list_add_tail(&uuid->list, &hdev->uuids);
2166 
2167 	hci_req_init(&req, hdev);
2168 
2169 	update_class(&req);
2170 	update_eir(&req);
2171 
2172 	err = hci_req_run(&req, add_uuid_complete);
2173 	if (err < 0) {
2174 		if (err != -ENODATA)
2175 			goto failed;
2176 
2177 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2178 				   hdev->dev_class, 3);
2179 		goto failed;
2180 	}
2181 
2182 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2183 	if (!cmd) {
2184 		err = -ENOMEM;
2185 		goto failed;
2186 	}
2187 
2188 	err = 0;
2189 
2190 failed:
2191 	hci_dev_unlock(hdev);
2192 	return err;
2193 }
2194 
2195 static bool enable_service_cache(struct hci_dev *hdev)
2196 {
2197 	if (!hdev_is_powered(hdev))
2198 		return false;
2199 
2200 	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2201 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2202 				   CACHE_TIMEOUT);
2203 		return true;
2204 	}
2205 
2206 	return false;
2207 }
2208 
2209 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2210 {
2211 	BT_DBG("status 0x%02x", status);
2212 
2213 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2214 }
2215 
2216 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2217 		       u16 len)
2218 {
2219 	struct mgmt_cp_remove_uuid *cp = data;
2220 	struct pending_cmd *cmd;
2221 	struct bt_uuid *match, *tmp;
2222 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2223 	struct hci_request req;
2224 	int err, found;
2225 
2226 	BT_DBG("request for %s", hdev->name);
2227 
2228 	hci_dev_lock(hdev);
2229 
2230 	if (pending_eir_or_class(hdev)) {
2231 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2232 				 MGMT_STATUS_BUSY);
2233 		goto unlock;
2234 	}
2235 
2236 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2237 		hci_uuids_clear(hdev);
2238 
2239 		if (enable_service_cache(hdev)) {
2240 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2241 					   0, hdev->dev_class, 3);
2242 			goto unlock;
2243 		}
2244 
2245 		goto update_class;
2246 	}
2247 
2248 	found = 0;
2249 
2250 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2251 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2252 			continue;
2253 
2254 		list_del(&match->list);
2255 		kfree(match);
2256 		found++;
2257 	}
2258 
2259 	if (found == 0) {
2260 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2261 				 MGMT_STATUS_INVALID_PARAMS);
2262 		goto unlock;
2263 	}
2264 
2265 update_class:
2266 	hci_req_init(&req, hdev);
2267 
2268 	update_class(&req);
2269 	update_eir(&req);
2270 
2271 	err = hci_req_run(&req, remove_uuid_complete);
2272 	if (err < 0) {
2273 		if (err != -ENODATA)
2274 			goto unlock;
2275 
2276 		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2277 				   hdev->dev_class, 3);
2278 		goto unlock;
2279 	}
2280 
2281 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2282 	if (!cmd) {
2283 		err = -ENOMEM;
2284 		goto unlock;
2285 	}
2286 
2287 	err = 0;
2288 
2289 unlock:
2290 	hci_dev_unlock(hdev);
2291 	return err;
2292 }
2293 
2294 static void set_class_complete(struct hci_dev *hdev, u8 status)
2295 {
2296 	BT_DBG("status 0x%02x", status);
2297 
2298 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2299 }
2300 
2301 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2302 			 u16 len)
2303 {
2304 	struct mgmt_cp_set_dev_class *cp = data;
2305 	struct pending_cmd *cmd;
2306 	struct hci_request req;
2307 	int err;
2308 
2309 	BT_DBG("request for %s", hdev->name);
2310 
2311 	if (!lmp_bredr_capable(hdev))
2312 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2313 				  MGMT_STATUS_NOT_SUPPORTED);
2314 
2315 	hci_dev_lock(hdev);
2316 
2317 	if (pending_eir_or_class(hdev)) {
2318 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2319 				 MGMT_STATUS_BUSY);
2320 		goto unlock;
2321 	}
2322 
2323 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2324 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2325 				 MGMT_STATUS_INVALID_PARAMS);
2326 		goto unlock;
2327 	}
2328 
2329 	hdev->major_class = cp->major;
2330 	hdev->minor_class = cp->minor;
2331 
2332 	if (!hdev_is_powered(hdev)) {
2333 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2334 				   hdev->dev_class, 3);
2335 		goto unlock;
2336 	}
2337 
2338 	hci_req_init(&req, hdev);
2339 
2340 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2341 		hci_dev_unlock(hdev);
2342 		cancel_delayed_work_sync(&hdev->service_cache);
2343 		hci_dev_lock(hdev);
2344 		update_eir(&req);
2345 	}
2346 
2347 	update_class(&req);
2348 
2349 	err = hci_req_run(&req, set_class_complete);
2350 	if (err < 0) {
2351 		if (err != -ENODATA)
2352 			goto unlock;
2353 
2354 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2355 				   hdev->dev_class, 3);
2356 		goto unlock;
2357 	}
2358 
2359 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2360 	if (!cmd) {
2361 		err = -ENOMEM;
2362 		goto unlock;
2363 	}
2364 
2365 	err = 0;
2366 
2367 unlock:
2368 	hci_dev_unlock(hdev);
2369 	return err;
2370 }
2371 
2372 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2373 			  u16 len)
2374 {
2375 	struct mgmt_cp_load_link_keys *cp = data;
2376 	u16 key_count, expected_len;
2377 	bool changed;
2378 	int i;
2379 
2380 	BT_DBG("request for %s", hdev->name);
2381 
2382 	if (!lmp_bredr_capable(hdev))
2383 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2384 				  MGMT_STATUS_NOT_SUPPORTED);
2385 
2386 	key_count = __le16_to_cpu(cp->key_count);
2387 
2388 	expected_len = sizeof(*cp) + key_count *
2389 					sizeof(struct mgmt_link_key_info);
2390 	if (expected_len != len) {
2391 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2392 		       expected_len, len);
2393 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2394 				  MGMT_STATUS_INVALID_PARAMS);
2395 	}
2396 
2397 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2398 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2399 				  MGMT_STATUS_INVALID_PARAMS);
2400 
2401 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2402 	       key_count);
2403 
2404 	for (i = 0; i < key_count; i++) {
2405 		struct mgmt_link_key_info *key = &cp->keys[i];
2406 
2407 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2408 			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2409 					  MGMT_STATUS_INVALID_PARAMS);
2410 	}
2411 
2412 	hci_dev_lock(hdev);
2413 
2414 	hci_link_keys_clear(hdev);
2415 
2416 	if (cp->debug_keys)
2417 		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2418 	else
2419 		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2420 
2421 	if (changed)
2422 		new_settings(hdev, NULL);
2423 
2424 	for (i = 0; i < key_count; i++) {
2425 		struct mgmt_link_key_info *key = &cp->keys[i];
2426 
2427 		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2428 				 key->type, key->pin_len);
2429 	}
2430 
2431 	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2432 
2433 	hci_dev_unlock(hdev);
2434 
2435 	return 0;
2436 }
2437 
2438 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2439 			   u8 addr_type, struct sock *skip_sk)
2440 {
2441 	struct mgmt_ev_device_unpaired ev;
2442 
2443 	bacpy(&ev.addr.bdaddr, bdaddr);
2444 	ev.addr.type = addr_type;
2445 
2446 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2447 			  skip_sk);
2448 }
2449 
2450 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2451 			 u16 len)
2452 {
2453 	struct mgmt_cp_unpair_device *cp = data;
2454 	struct mgmt_rp_unpair_device rp;
2455 	struct hci_cp_disconnect dc;
2456 	struct pending_cmd *cmd;
2457 	struct hci_conn *conn;
2458 	int err;
2459 
2460 	memset(&rp, 0, sizeof(rp));
2461 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2462 	rp.addr.type = cp->addr.type;
2463 
2464 	if (!bdaddr_type_is_valid(cp->addr.type))
2465 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 				    MGMT_STATUS_INVALID_PARAMS,
2467 				    &rp, sizeof(rp));
2468 
2469 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2470 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2471 				    MGMT_STATUS_INVALID_PARAMS,
2472 				    &rp, sizeof(rp));
2473 
2474 	hci_dev_lock(hdev);
2475 
2476 	if (!hdev_is_powered(hdev)) {
2477 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2478 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2479 		goto unlock;
2480 	}
2481 
2482 	if (cp->addr.type == BDADDR_BREDR) {
2483 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2484 	} else {
2485 		u8 addr_type;
2486 
2487 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2488 			addr_type = ADDR_LE_DEV_PUBLIC;
2489 		else
2490 			addr_type = ADDR_LE_DEV_RANDOM;
2491 
2492 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2493 
2494 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2495 
2496 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2497 	}
2498 
2499 	if (err < 0) {
2500 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2501 				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2502 		goto unlock;
2503 	}
2504 
2505 	if (cp->disconnect) {
2506 		if (cp->addr.type == BDADDR_BREDR)
2507 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2508 						       &cp->addr.bdaddr);
2509 		else
2510 			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2511 						       &cp->addr.bdaddr);
2512 	} else {
2513 		conn = NULL;
2514 	}
2515 
2516 	if (!conn) {
2517 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2518 				   &rp, sizeof(rp));
2519 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2520 		goto unlock;
2521 	}
2522 
2523 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2524 			       sizeof(*cp));
2525 	if (!cmd) {
2526 		err = -ENOMEM;
2527 		goto unlock;
2528 	}
2529 
2530 	dc.handle = cpu_to_le16(conn->handle);
2531 	dc.reason = 0x13; /* Remote User Terminated Connection */
2532 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2533 	if (err < 0)
2534 		mgmt_pending_remove(cmd);
2535 
2536 unlock:
2537 	hci_dev_unlock(hdev);
2538 	return err;
2539 }
2540 
2541 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2542 		      u16 len)
2543 {
2544 	struct mgmt_cp_disconnect *cp = data;
2545 	struct mgmt_rp_disconnect rp;
2546 	struct hci_cp_disconnect dc;
2547 	struct pending_cmd *cmd;
2548 	struct hci_conn *conn;
2549 	int err;
2550 
2551 	BT_DBG("");
2552 
2553 	memset(&rp, 0, sizeof(rp));
2554 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2555 	rp.addr.type = cp->addr.type;
2556 
2557 	if (!bdaddr_type_is_valid(cp->addr.type))
2558 		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2559 				    MGMT_STATUS_INVALID_PARAMS,
2560 				    &rp, sizeof(rp));
2561 
2562 	hci_dev_lock(hdev);
2563 
2564 	if (!test_bit(HCI_UP, &hdev->flags)) {
2565 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2566 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2567 		goto failed;
2568 	}
2569 
2570 	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2571 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2572 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2573 		goto failed;
2574 	}
2575 
2576 	if (cp->addr.type == BDADDR_BREDR)
2577 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2578 					       &cp->addr.bdaddr);
2579 	else
2580 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2581 
2582 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2583 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2584 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2585 		goto failed;
2586 	}
2587 
2588 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2589 	if (!cmd) {
2590 		err = -ENOMEM;
2591 		goto failed;
2592 	}
2593 
2594 	dc.handle = cpu_to_le16(conn->handle);
2595 	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2596 
2597 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2598 	if (err < 0)
2599 		mgmt_pending_remove(cmd);
2600 
2601 failed:
2602 	hci_dev_unlock(hdev);
2603 	return err;
2604 }
2605 
2606 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2607 {
2608 	switch (link_type) {
2609 	case LE_LINK:
2610 		switch (addr_type) {
2611 		case ADDR_LE_DEV_PUBLIC:
2612 			return BDADDR_LE_PUBLIC;
2613 
2614 		default:
2615 			/* Fallback to LE Random address type */
2616 			return BDADDR_LE_RANDOM;
2617 		}
2618 
2619 	default:
2620 		/* Fallback to BR/EDR type */
2621 		return BDADDR_BREDR;
2622 	}
2623 }
2624 
2625 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2626 			   u16 data_len)
2627 {
2628 	struct mgmt_rp_get_connections *rp;
2629 	struct hci_conn *c;
2630 	size_t rp_len;
2631 	int err;
2632 	u16 i;
2633 
2634 	BT_DBG("");
2635 
2636 	hci_dev_lock(hdev);
2637 
2638 	if (!hdev_is_powered(hdev)) {
2639 		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2640 				 MGMT_STATUS_NOT_POWERED);
2641 		goto unlock;
2642 	}
2643 
2644 	i = 0;
2645 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2646 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2647 			i++;
2648 	}
2649 
2650 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2651 	rp = kmalloc(rp_len, GFP_KERNEL);
2652 	if (!rp) {
2653 		err = -ENOMEM;
2654 		goto unlock;
2655 	}
2656 
2657 	i = 0;
2658 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2659 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2660 			continue;
2661 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2662 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2663 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2664 			continue;
2665 		i++;
2666 	}
2667 
2668 	rp->conn_count = cpu_to_le16(i);
2669 
2670 	/* Recalculate length in case of filtered SCO connections, etc */
2671 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2672 
2673 	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2674 			   rp_len);
2675 
2676 	kfree(rp);
2677 
2678 unlock:
2679 	hci_dev_unlock(hdev);
2680 	return err;
2681 }
2682 
2683 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2684 				   struct mgmt_cp_pin_code_neg_reply *cp)
2685 {
2686 	struct pending_cmd *cmd;
2687 	int err;
2688 
2689 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2690 			       sizeof(*cp));
2691 	if (!cmd)
2692 		return -ENOMEM;
2693 
2694 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2695 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2696 	if (err < 0)
2697 		mgmt_pending_remove(cmd);
2698 
2699 	return err;
2700 }
2701 
2702 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2703 			  u16 len)
2704 {
2705 	struct hci_conn *conn;
2706 	struct mgmt_cp_pin_code_reply *cp = data;
2707 	struct hci_cp_pin_code_reply reply;
2708 	struct pending_cmd *cmd;
2709 	int err;
2710 
2711 	BT_DBG("");
2712 
2713 	hci_dev_lock(hdev);
2714 
2715 	if (!hdev_is_powered(hdev)) {
2716 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2717 				 MGMT_STATUS_NOT_POWERED);
2718 		goto failed;
2719 	}
2720 
2721 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2722 	if (!conn) {
2723 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2724 				 MGMT_STATUS_NOT_CONNECTED);
2725 		goto failed;
2726 	}
2727 
2728 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2729 		struct mgmt_cp_pin_code_neg_reply ncp;
2730 
2731 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2732 
2733 		BT_ERR("PIN code is not 16 bytes long");
2734 
2735 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2736 		if (err >= 0)
2737 			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2738 					 MGMT_STATUS_INVALID_PARAMS);
2739 
2740 		goto failed;
2741 	}
2742 
2743 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2744 	if (!cmd) {
2745 		err = -ENOMEM;
2746 		goto failed;
2747 	}
2748 
2749 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2750 	reply.pin_len = cp->pin_len;
2751 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2752 
2753 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2754 	if (err < 0)
2755 		mgmt_pending_remove(cmd);
2756 
2757 failed:
2758 	hci_dev_unlock(hdev);
2759 	return err;
2760 }
2761 
2762 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2763 			     u16 len)
2764 {
2765 	struct mgmt_cp_set_io_capability *cp = data;
2766 
2767 	BT_DBG("");
2768 
2769 	hci_dev_lock(hdev);
2770 
2771 	hdev->io_capability = cp->io_capability;
2772 
2773 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2774 	       hdev->io_capability);
2775 
2776 	hci_dev_unlock(hdev);
2777 
2778 	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2779 			    0);
2780 }
2781 
2782 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2783 {
2784 	struct hci_dev *hdev = conn->hdev;
2785 	struct pending_cmd *cmd;
2786 
2787 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2788 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2789 			continue;
2790 
2791 		if (cmd->user_data != conn)
2792 			continue;
2793 
2794 		return cmd;
2795 	}
2796 
2797 	return NULL;
2798 }
2799 
2800 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2801 {
2802 	struct mgmt_rp_pair_device rp;
2803 	struct hci_conn *conn = cmd->user_data;
2804 
2805 	bacpy(&rp.addr.bdaddr, &conn->dst);
2806 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2807 
2808 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2809 		     &rp, sizeof(rp));
2810 
2811 	/* So we don't get further callbacks for this connection */
2812 	conn->connect_cfm_cb = NULL;
2813 	conn->security_cfm_cb = NULL;
2814 	conn->disconn_cfm_cb = NULL;
2815 
2816 	hci_conn_drop(conn);
2817 
2818 	mgmt_pending_remove(cmd);
2819 }
2820 
2821 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2822 {
2823 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2824 	struct pending_cmd *cmd;
2825 
2826 	cmd = find_pairing(conn);
2827 	if (cmd)
2828 		pairing_complete(cmd, status);
2829 }
2830 
2831 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2832 {
2833 	struct pending_cmd *cmd;
2834 
2835 	BT_DBG("status %u", status);
2836 
2837 	cmd = find_pairing(conn);
2838 	if (!cmd)
2839 		BT_DBG("Unable to find a pending command");
2840 	else
2841 		pairing_complete(cmd, mgmt_status(status));
2842 }
2843 
2844 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2845 {
2846 	struct pending_cmd *cmd;
2847 
2848 	BT_DBG("status %u", status);
2849 
2850 	if (!status)
2851 		return;
2852 
2853 	cmd = find_pairing(conn);
2854 	if (!cmd)
2855 		BT_DBG("Unable to find a pending command");
2856 	else
2857 		pairing_complete(cmd, mgmt_status(status));
2858 }
2859 
2860 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2861 		       u16 len)
2862 {
2863 	struct mgmt_cp_pair_device *cp = data;
2864 	struct mgmt_rp_pair_device rp;
2865 	struct pending_cmd *cmd;
2866 	u8 sec_level, auth_type;
2867 	struct hci_conn *conn;
2868 	int err;
2869 
2870 	BT_DBG("");
2871 
2872 	memset(&rp, 0, sizeof(rp));
2873 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2874 	rp.addr.type = cp->addr.type;
2875 
2876 	if (!bdaddr_type_is_valid(cp->addr.type))
2877 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2878 				    MGMT_STATUS_INVALID_PARAMS,
2879 				    &rp, sizeof(rp));
2880 
2881 	hci_dev_lock(hdev);
2882 
2883 	if (!hdev_is_powered(hdev)) {
2884 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2885 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2886 		goto unlock;
2887 	}
2888 
2889 	sec_level = BT_SECURITY_MEDIUM;
2890 	auth_type = HCI_AT_DEDICATED_BONDING;
2891 
2892 	if (cp->addr.type == BDADDR_BREDR) {
2893 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2894 				       auth_type);
2895 	} else {
2896 		u8 addr_type;
2897 
2898 		/* Convert from L2CAP channel address type to HCI address type
2899 		 */
2900 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2901 			addr_type = ADDR_LE_DEV_PUBLIC;
2902 		else
2903 			addr_type = ADDR_LE_DEV_RANDOM;
2904 
2905 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2906 				      sec_level, auth_type);
2907 	}
2908 
2909 	if (IS_ERR(conn)) {
2910 		int status;
2911 
2912 		if (PTR_ERR(conn) == -EBUSY)
2913 			status = MGMT_STATUS_BUSY;
2914 		else
2915 			status = MGMT_STATUS_CONNECT_FAILED;
2916 
2917 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2918 				   status, &rp,
2919 				   sizeof(rp));
2920 		goto unlock;
2921 	}
2922 
2923 	if (conn->connect_cfm_cb) {
2924 		hci_conn_drop(conn);
2925 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2927 		goto unlock;
2928 	}
2929 
2930 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2931 	if (!cmd) {
2932 		err = -ENOMEM;
2933 		hci_conn_drop(conn);
2934 		goto unlock;
2935 	}
2936 
2937 	/* For LE, just connecting isn't a proof that the pairing finished */
2938 	if (cp->addr.type == BDADDR_BREDR) {
2939 		conn->connect_cfm_cb = pairing_complete_cb;
2940 		conn->security_cfm_cb = pairing_complete_cb;
2941 		conn->disconn_cfm_cb = pairing_complete_cb;
2942 	} else {
2943 		conn->connect_cfm_cb = le_pairing_complete_cb;
2944 		conn->security_cfm_cb = le_pairing_complete_cb;
2945 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2946 	}
2947 
2948 	conn->io_capability = cp->io_cap;
2949 	cmd->user_data = conn;
2950 
2951 	if (conn->state == BT_CONNECTED &&
2952 	    hci_conn_security(conn, sec_level, auth_type))
2953 		pairing_complete(cmd, 0);
2954 
2955 	err = 0;
2956 
2957 unlock:
2958 	hci_dev_unlock(hdev);
2959 	return err;
2960 }
2961 
2962 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2963 			      u16 len)
2964 {
2965 	struct mgmt_addr_info *addr = data;
2966 	struct pending_cmd *cmd;
2967 	struct hci_conn *conn;
2968 	int err;
2969 
2970 	BT_DBG("");
2971 
2972 	hci_dev_lock(hdev);
2973 
2974 	if (!hdev_is_powered(hdev)) {
2975 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2976 				 MGMT_STATUS_NOT_POWERED);
2977 		goto unlock;
2978 	}
2979 
2980 	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2981 	if (!cmd) {
2982 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2983 				 MGMT_STATUS_INVALID_PARAMS);
2984 		goto unlock;
2985 	}
2986 
2987 	conn = cmd->user_data;
2988 
2989 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2990 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2991 				 MGMT_STATUS_INVALID_PARAMS);
2992 		goto unlock;
2993 	}
2994 
2995 	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2996 
2997 	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2998 			   addr, sizeof(*addr));
2999 unlock:
3000 	hci_dev_unlock(hdev);
3001 	return err;
3002 }
3003 
3004 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3005 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3006 			     u16 hci_op, __le32 passkey)
3007 {
3008 	struct pending_cmd *cmd;
3009 	struct hci_conn *conn;
3010 	int err;
3011 
3012 	hci_dev_lock(hdev);
3013 
3014 	if (!hdev_is_powered(hdev)) {
3015 		err = cmd_complete(sk, hdev->id, mgmt_op,
3016 				   MGMT_STATUS_NOT_POWERED, addr,
3017 				   sizeof(*addr));
3018 		goto done;
3019 	}
3020 
3021 	if (addr->type == BDADDR_BREDR)
3022 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3023 	else
3024 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3025 
3026 	if (!conn) {
3027 		err = cmd_complete(sk, hdev->id, mgmt_op,
3028 				   MGMT_STATUS_NOT_CONNECTED, addr,
3029 				   sizeof(*addr));
3030 		goto done;
3031 	}
3032 
3033 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3034 		/* Continue with pairing via SMP. The hdev lock must be
3035 		 * released as SMP may try to recquire it for crypto
3036 		 * purposes.
3037 		 */
3038 		hci_dev_unlock(hdev);
3039 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 		hci_dev_lock(hdev);
3041 
3042 		if (!err)
3043 			err = cmd_complete(sk, hdev->id, mgmt_op,
3044 					   MGMT_STATUS_SUCCESS, addr,
3045 					   sizeof(*addr));
3046 		else
3047 			err = cmd_complete(sk, hdev->id, mgmt_op,
3048 					   MGMT_STATUS_FAILED, addr,
3049 					   sizeof(*addr));
3050 
3051 		goto done;
3052 	}
3053 
3054 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3055 	if (!cmd) {
3056 		err = -ENOMEM;
3057 		goto done;
3058 	}
3059 
3060 	/* Continue with pairing via HCI */
3061 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3062 		struct hci_cp_user_passkey_reply cp;
3063 
3064 		bacpy(&cp.bdaddr, &addr->bdaddr);
3065 		cp.passkey = passkey;
3066 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3067 	} else
3068 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3069 				   &addr->bdaddr);
3070 
3071 	if (err < 0)
3072 		mgmt_pending_remove(cmd);
3073 
3074 done:
3075 	hci_dev_unlock(hdev);
3076 	return err;
3077 }
3078 
3079 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3080 			      void *data, u16 len)
3081 {
3082 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3083 
3084 	BT_DBG("");
3085 
3086 	return user_pairing_resp(sk, hdev, &cp->addr,
3087 				MGMT_OP_PIN_CODE_NEG_REPLY,
3088 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3089 }
3090 
3091 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3092 			      u16 len)
3093 {
3094 	struct mgmt_cp_user_confirm_reply *cp = data;
3095 
3096 	BT_DBG("");
3097 
3098 	if (len != sizeof(*cp))
3099 		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3100 				  MGMT_STATUS_INVALID_PARAMS);
3101 
3102 	return user_pairing_resp(sk, hdev, &cp->addr,
3103 				 MGMT_OP_USER_CONFIRM_REPLY,
3104 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3105 }
3106 
3107 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3108 				  void *data, u16 len)
3109 {
3110 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3111 
3112 	BT_DBG("");
3113 
3114 	return user_pairing_resp(sk, hdev, &cp->addr,
3115 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3116 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3117 }
3118 
3119 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3120 			      u16 len)
3121 {
3122 	struct mgmt_cp_user_passkey_reply *cp = data;
3123 
3124 	BT_DBG("");
3125 
3126 	return user_pairing_resp(sk, hdev, &cp->addr,
3127 				 MGMT_OP_USER_PASSKEY_REPLY,
3128 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3129 }
3130 
3131 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3132 				  void *data, u16 len)
3133 {
3134 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3135 
3136 	BT_DBG("");
3137 
3138 	return user_pairing_resp(sk, hdev, &cp->addr,
3139 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3140 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3141 }
3142 
3143 static void update_name(struct hci_request *req)
3144 {
3145 	struct hci_dev *hdev = req->hdev;
3146 	struct hci_cp_write_local_name cp;
3147 
3148 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3149 
3150 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3151 }
3152 
3153 static void set_name_complete(struct hci_dev *hdev, u8 status)
3154 {
3155 	struct mgmt_cp_set_local_name *cp;
3156 	struct pending_cmd *cmd;
3157 
3158 	BT_DBG("status 0x%02x", status);
3159 
3160 	hci_dev_lock(hdev);
3161 
3162 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3163 	if (!cmd)
3164 		goto unlock;
3165 
3166 	cp = cmd->param;
3167 
3168 	if (status)
3169 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3170 			   mgmt_status(status));
3171 	else
3172 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3173 			     cp, sizeof(*cp));
3174 
3175 	mgmt_pending_remove(cmd);
3176 
3177 unlock:
3178 	hci_dev_unlock(hdev);
3179 }
3180 
3181 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3182 			  u16 len)
3183 {
3184 	struct mgmt_cp_set_local_name *cp = data;
3185 	struct pending_cmd *cmd;
3186 	struct hci_request req;
3187 	int err;
3188 
3189 	BT_DBG("");
3190 
3191 	hci_dev_lock(hdev);
3192 
3193 	/* If the old values are the same as the new ones just return a
3194 	 * direct command complete event.
3195 	 */
3196 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3197 	    !memcmp(hdev->short_name, cp->short_name,
3198 		    sizeof(hdev->short_name))) {
3199 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3200 				   data, len);
3201 		goto failed;
3202 	}
3203 
3204 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3205 
3206 	if (!hdev_is_powered(hdev)) {
3207 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3208 
3209 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3210 				   data, len);
3211 		if (err < 0)
3212 			goto failed;
3213 
3214 		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3215 				 sk);
3216 
3217 		goto failed;
3218 	}
3219 
3220 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3221 	if (!cmd) {
3222 		err = -ENOMEM;
3223 		goto failed;
3224 	}
3225 
3226 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3227 
3228 	hci_req_init(&req, hdev);
3229 
3230 	if (lmp_bredr_capable(hdev)) {
3231 		update_name(&req);
3232 		update_eir(&req);
3233 	}
3234 
3235 	/* The name is stored in the scan response data and so
3236 	 * no need to udpate the advertising data here.
3237 	 */
3238 	if (lmp_le_capable(hdev))
3239 		update_scan_rsp_data(&req);
3240 
3241 	err = hci_req_run(&req, set_name_complete);
3242 	if (err < 0)
3243 		mgmt_pending_remove(cmd);
3244 
3245 failed:
3246 	hci_dev_unlock(hdev);
3247 	return err;
3248 }
3249 
3250 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3251 			       void *data, u16 data_len)
3252 {
3253 	struct pending_cmd *cmd;
3254 	int err;
3255 
3256 	BT_DBG("%s", hdev->name);
3257 
3258 	hci_dev_lock(hdev);
3259 
3260 	if (!hdev_is_powered(hdev)) {
3261 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3262 				 MGMT_STATUS_NOT_POWERED);
3263 		goto unlock;
3264 	}
3265 
3266 	if (!lmp_ssp_capable(hdev)) {
3267 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3268 				 MGMT_STATUS_NOT_SUPPORTED);
3269 		goto unlock;
3270 	}
3271 
3272 	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3273 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3274 				 MGMT_STATUS_BUSY);
3275 		goto unlock;
3276 	}
3277 
3278 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3279 	if (!cmd) {
3280 		err = -ENOMEM;
3281 		goto unlock;
3282 	}
3283 
3284 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3285 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3286 				   0, NULL);
3287 	else
3288 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3289 
3290 	if (err < 0)
3291 		mgmt_pending_remove(cmd);
3292 
3293 unlock:
3294 	hci_dev_unlock(hdev);
3295 	return err;
3296 }
3297 
3298 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3299 			       void *data, u16 len)
3300 {
3301 	int err;
3302 
3303 	BT_DBG("%s ", hdev->name);
3304 
3305 	hci_dev_lock(hdev);
3306 
3307 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3308 		struct mgmt_cp_add_remote_oob_data *cp = data;
3309 		u8 status;
3310 
3311 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3312 					      cp->hash, cp->randomizer);
3313 		if (err < 0)
3314 			status = MGMT_STATUS_FAILED;
3315 		else
3316 			status = MGMT_STATUS_SUCCESS;
3317 
3318 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3319 				   status, &cp->addr, sizeof(cp->addr));
3320 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3321 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3322 		u8 status;
3323 
3324 		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3325 						  cp->hash192,
3326 						  cp->randomizer192,
3327 						  cp->hash256,
3328 						  cp->randomizer256);
3329 		if (err < 0)
3330 			status = MGMT_STATUS_FAILED;
3331 		else
3332 			status = MGMT_STATUS_SUCCESS;
3333 
3334 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3335 				   status, &cp->addr, sizeof(cp->addr));
3336 	} else {
3337 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3338 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3339 				 MGMT_STATUS_INVALID_PARAMS);
3340 	}
3341 
3342 	hci_dev_unlock(hdev);
3343 	return err;
3344 }
3345 
3346 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3347 				  void *data, u16 len)
3348 {
3349 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3350 	u8 status;
3351 	int err;
3352 
3353 	BT_DBG("%s", hdev->name);
3354 
3355 	hci_dev_lock(hdev);
3356 
3357 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3358 	if (err < 0)
3359 		status = MGMT_STATUS_INVALID_PARAMS;
3360 	else
3361 		status = MGMT_STATUS_SUCCESS;
3362 
3363 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3364 			   status, &cp->addr, sizeof(cp->addr));
3365 
3366 	hci_dev_unlock(hdev);
3367 	return err;
3368 }
3369 
3370 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3371 {
3372 	struct pending_cmd *cmd;
3373 	u8 type;
3374 	int err;
3375 
3376 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3377 
3378 	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3379 	if (!cmd)
3380 		return -ENOENT;
3381 
3382 	type = hdev->discovery.type;
3383 
3384 	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3385 			   &type, sizeof(type));
3386 	mgmt_pending_remove(cmd);
3387 
3388 	return err;
3389 }
3390 
3391 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3392 {
3393 	unsigned long timeout = 0;
3394 
3395 	BT_DBG("status %d", status);
3396 
3397 	if (status) {
3398 		hci_dev_lock(hdev);
3399 		mgmt_start_discovery_failed(hdev, status);
3400 		hci_dev_unlock(hdev);
3401 		return;
3402 	}
3403 
3404 	hci_dev_lock(hdev);
3405 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3406 	hci_dev_unlock(hdev);
3407 
3408 	switch (hdev->discovery.type) {
3409 	case DISCOV_TYPE_LE:
3410 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3411 		break;
3412 
3413 	case DISCOV_TYPE_INTERLEAVED:
3414 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3415 		break;
3416 
3417 	case DISCOV_TYPE_BREDR:
3418 		break;
3419 
3420 	default:
3421 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3422 	}
3423 
3424 	if (!timeout)
3425 		return;
3426 
3427 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3428 }
3429 
3430 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3431 			   void *data, u16 len)
3432 {
3433 	struct mgmt_cp_start_discovery *cp = data;
3434 	struct pending_cmd *cmd;
3435 	struct hci_cp_le_set_scan_param param_cp;
3436 	struct hci_cp_le_set_scan_enable enable_cp;
3437 	struct hci_cp_inquiry inq_cp;
3438 	struct hci_request req;
3439 	/* General inquiry access code (GIAC) */
3440 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3441 	u8 status, own_addr_type;
3442 	int err;
3443 
3444 	BT_DBG("%s", hdev->name);
3445 
3446 	hci_dev_lock(hdev);
3447 
3448 	if (!hdev_is_powered(hdev)) {
3449 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3450 				 MGMT_STATUS_NOT_POWERED);
3451 		goto failed;
3452 	}
3453 
3454 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3455 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3456 				 MGMT_STATUS_BUSY);
3457 		goto failed;
3458 	}
3459 
3460 	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3461 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3462 				 MGMT_STATUS_BUSY);
3463 		goto failed;
3464 	}
3465 
3466 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3467 	if (!cmd) {
3468 		err = -ENOMEM;
3469 		goto failed;
3470 	}
3471 
3472 	hdev->discovery.type = cp->type;
3473 
3474 	hci_req_init(&req, hdev);
3475 
3476 	switch (hdev->discovery.type) {
3477 	case DISCOV_TYPE_BREDR:
3478 		status = mgmt_bredr_support(hdev);
3479 		if (status) {
3480 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3481 					 status);
3482 			mgmt_pending_remove(cmd);
3483 			goto failed;
3484 		}
3485 
3486 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3487 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3488 					 MGMT_STATUS_BUSY);
3489 			mgmt_pending_remove(cmd);
3490 			goto failed;
3491 		}
3492 
3493 		hci_inquiry_cache_flush(hdev);
3494 
3495 		memset(&inq_cp, 0, sizeof(inq_cp));
3496 		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3497 		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3498 		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3499 		break;
3500 
3501 	case DISCOV_TYPE_LE:
3502 	case DISCOV_TYPE_INTERLEAVED:
3503 		status = mgmt_le_support(hdev);
3504 		if (status) {
3505 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3506 					 status);
3507 			mgmt_pending_remove(cmd);
3508 			goto failed;
3509 		}
3510 
3511 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3512 		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3513 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3514 					 MGMT_STATUS_NOT_SUPPORTED);
3515 			mgmt_pending_remove(cmd);
3516 			goto failed;
3517 		}
3518 
3519 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3520 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3521 					 MGMT_STATUS_REJECTED);
3522 			mgmt_pending_remove(cmd);
3523 			goto failed;
3524 		}
3525 
3526 		/* If controller is scanning, it means the background scanning
3527 		 * is running. Thus, we should temporarily stop it in order to
3528 		 * set the discovery scanning parameters.
3529 		 */
3530 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3531 			hci_req_add_le_scan_disable(&req);
3532 
3533 		memset(&param_cp, 0, sizeof(param_cp));
3534 
3535 		/* All active scans will be done with either a resolvable
3536 		 * private address (when privacy feature has been enabled)
3537 		 * or unresolvable private address.
3538 		 */
3539 		err = hci_update_random_address(&req, true, &own_addr_type);
3540 		if (err < 0) {
3541 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3542 					 MGMT_STATUS_FAILED);
3543 			mgmt_pending_remove(cmd);
3544 			goto failed;
3545 		}
3546 
3547 		param_cp.type = LE_SCAN_ACTIVE;
3548 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3549 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3550 		param_cp.own_address_type = own_addr_type;
3551 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3552 			    &param_cp);
3553 
3554 		memset(&enable_cp, 0, sizeof(enable_cp));
3555 		enable_cp.enable = LE_SCAN_ENABLE;
3556 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3557 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3558 			    &enable_cp);
3559 		break;
3560 
3561 	default:
3562 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3563 				 MGMT_STATUS_INVALID_PARAMS);
3564 		mgmt_pending_remove(cmd);
3565 		goto failed;
3566 	}
3567 
3568 	err = hci_req_run(&req, start_discovery_complete);
3569 	if (err < 0)
3570 		mgmt_pending_remove(cmd);
3571 	else
3572 		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3573 
3574 failed:
3575 	hci_dev_unlock(hdev);
3576 	return err;
3577 }
3578 
3579 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3580 {
3581 	struct pending_cmd *cmd;
3582 	int err;
3583 
3584 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3585 	if (!cmd)
3586 		return -ENOENT;
3587 
3588 	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3589 			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3590 	mgmt_pending_remove(cmd);
3591 
3592 	return err;
3593 }
3594 
3595 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3596 {
3597 	BT_DBG("status %d", status);
3598 
3599 	hci_dev_lock(hdev);
3600 
3601 	if (status) {
3602 		mgmt_stop_discovery_failed(hdev, status);
3603 		goto unlock;
3604 	}
3605 
3606 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3607 
3608 unlock:
3609 	hci_dev_unlock(hdev);
3610 }
3611 
3612 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3613 			  u16 len)
3614 {
3615 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3616 	struct pending_cmd *cmd;
3617 	struct hci_request req;
3618 	int err;
3619 
3620 	BT_DBG("%s", hdev->name);
3621 
3622 	hci_dev_lock(hdev);
3623 
3624 	if (!hci_discovery_active(hdev)) {
3625 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3626 				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
3627 				   sizeof(mgmt_cp->type));
3628 		goto unlock;
3629 	}
3630 
3631 	if (hdev->discovery.type != mgmt_cp->type) {
3632 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3633 				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3634 				   sizeof(mgmt_cp->type));
3635 		goto unlock;
3636 	}
3637 
3638 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3639 	if (!cmd) {
3640 		err = -ENOMEM;
3641 		goto unlock;
3642 	}
3643 
3644 	hci_req_init(&req, hdev);
3645 
3646 	hci_stop_discovery(&req);
3647 
3648 	err = hci_req_run(&req, stop_discovery_complete);
3649 	if (!err) {
3650 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3651 		goto unlock;
3652 	}
3653 
3654 	mgmt_pending_remove(cmd);
3655 
3656 	/* If no HCI commands were sent we're done */
3657 	if (err == -ENODATA) {
3658 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3659 				   &mgmt_cp->type, sizeof(mgmt_cp->type));
3660 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3661 	}
3662 
3663 unlock:
3664 	hci_dev_unlock(hdev);
3665 	return err;
3666 }
3667 
3668 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3669 			u16 len)
3670 {
3671 	struct mgmt_cp_confirm_name *cp = data;
3672 	struct inquiry_entry *e;
3673 	int err;
3674 
3675 	BT_DBG("%s", hdev->name);
3676 
3677 	hci_dev_lock(hdev);
3678 
3679 	if (!hci_discovery_active(hdev)) {
3680 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3681 				   MGMT_STATUS_FAILED, &cp->addr,
3682 				   sizeof(cp->addr));
3683 		goto failed;
3684 	}
3685 
3686 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3687 	if (!e) {
3688 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3689 				   MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3690 				   sizeof(cp->addr));
3691 		goto failed;
3692 	}
3693 
3694 	if (cp->name_known) {
3695 		e->name_state = NAME_KNOWN;
3696 		list_del(&e->list);
3697 	} else {
3698 		e->name_state = NAME_NEEDED;
3699 		hci_inquiry_cache_update_resolve(hdev, e);
3700 	}
3701 
3702 	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3703 			   sizeof(cp->addr));
3704 
3705 failed:
3706 	hci_dev_unlock(hdev);
3707 	return err;
3708 }
3709 
3710 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3711 			u16 len)
3712 {
3713 	struct mgmt_cp_block_device *cp = data;
3714 	u8 status;
3715 	int err;
3716 
3717 	BT_DBG("%s", hdev->name);
3718 
3719 	if (!bdaddr_type_is_valid(cp->addr.type))
3720 		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3721 				    MGMT_STATUS_INVALID_PARAMS,
3722 				    &cp->addr, sizeof(cp->addr));
3723 
3724 	hci_dev_lock(hdev);
3725 
3726 	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3727 	if (err < 0)
3728 		status = MGMT_STATUS_FAILED;
3729 	else
3730 		status = MGMT_STATUS_SUCCESS;
3731 
3732 	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3733 			   &cp->addr, sizeof(cp->addr));
3734 
3735 	hci_dev_unlock(hdev);
3736 
3737 	return err;
3738 }
3739 
3740 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3741 			  u16 len)
3742 {
3743 	struct mgmt_cp_unblock_device *cp = data;
3744 	u8 status;
3745 	int err;
3746 
3747 	BT_DBG("%s", hdev->name);
3748 
3749 	if (!bdaddr_type_is_valid(cp->addr.type))
3750 		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3751 				    MGMT_STATUS_INVALID_PARAMS,
3752 				    &cp->addr, sizeof(cp->addr));
3753 
3754 	hci_dev_lock(hdev);
3755 
3756 	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3757 	if (err < 0)
3758 		status = MGMT_STATUS_INVALID_PARAMS;
3759 	else
3760 		status = MGMT_STATUS_SUCCESS;
3761 
3762 	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3763 			   &cp->addr, sizeof(cp->addr));
3764 
3765 	hci_dev_unlock(hdev);
3766 
3767 	return err;
3768 }
3769 
3770 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3771 			 u16 len)
3772 {
3773 	struct mgmt_cp_set_device_id *cp = data;
3774 	struct hci_request req;
3775 	int err;
3776 	__u16 source;
3777 
3778 	BT_DBG("%s", hdev->name);
3779 
3780 	source = __le16_to_cpu(cp->source);
3781 
3782 	if (source > 0x0002)
3783 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3784 				  MGMT_STATUS_INVALID_PARAMS);
3785 
3786 	hci_dev_lock(hdev);
3787 
3788 	hdev->devid_source = source;
3789 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3790 	hdev->devid_product = __le16_to_cpu(cp->product);
3791 	hdev->devid_version = __le16_to_cpu(cp->version);
3792 
3793 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3794 
3795 	hci_req_init(&req, hdev);
3796 	update_eir(&req);
3797 	hci_req_run(&req, NULL);
3798 
3799 	hci_dev_unlock(hdev);
3800 
3801 	return err;
3802 }
3803 
3804 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3805 {
3806 	struct cmd_lookup match = { NULL, hdev };
3807 
3808 	if (status) {
3809 		u8 mgmt_err = mgmt_status(status);
3810 
3811 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3812 				     cmd_status_rsp, &mgmt_err);
3813 		return;
3814 	}
3815 
3816 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3817 			     &match);
3818 
3819 	new_settings(hdev, match.sk);
3820 
3821 	if (match.sk)
3822 		sock_put(match.sk);
3823 }
3824 
3825 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3826 			   u16 len)
3827 {
3828 	struct mgmt_mode *cp = data;
3829 	struct pending_cmd *cmd;
3830 	struct hci_request req;
3831 	u8 val, enabled, status;
3832 	int err;
3833 
3834 	BT_DBG("request for %s", hdev->name);
3835 
3836 	status = mgmt_le_support(hdev);
3837 	if (status)
3838 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3839 				  status);
3840 
3841 	if (cp->val != 0x00 && cp->val != 0x01)
3842 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3843 				  MGMT_STATUS_INVALID_PARAMS);
3844 
3845 	hci_dev_lock(hdev);
3846 
3847 	val = !!cp->val;
3848 	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3849 
3850 	/* The following conditions are ones which mean that we should
3851 	 * not do any HCI communication but directly send a mgmt
3852 	 * response to user space (after toggling the flag if
3853 	 * necessary).
3854 	 */
3855 	if (!hdev_is_powered(hdev) || val == enabled ||
3856 	    hci_conn_num(hdev, LE_LINK) > 0) {
3857 		bool changed = false;
3858 
3859 		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3860 			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3861 			changed = true;
3862 		}
3863 
3864 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3865 		if (err < 0)
3866 			goto unlock;
3867 
3868 		if (changed)
3869 			err = new_settings(hdev, sk);
3870 
3871 		goto unlock;
3872 	}
3873 
3874 	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3875 	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3876 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3877 				 MGMT_STATUS_BUSY);
3878 		goto unlock;
3879 	}
3880 
3881 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3882 	if (!cmd) {
3883 		err = -ENOMEM;
3884 		goto unlock;
3885 	}
3886 
3887 	hci_req_init(&req, hdev);
3888 
3889 	if (val)
3890 		enable_advertising(&req);
3891 	else
3892 		disable_advertising(&req);
3893 
3894 	err = hci_req_run(&req, set_advertising_complete);
3895 	if (err < 0)
3896 		mgmt_pending_remove(cmd);
3897 
3898 unlock:
3899 	hci_dev_unlock(hdev);
3900 	return err;
3901 }
3902 
3903 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3904 			      void *data, u16 len)
3905 {
3906 	struct mgmt_cp_set_static_address *cp = data;
3907 	int err;
3908 
3909 	BT_DBG("%s", hdev->name);
3910 
3911 	if (!lmp_le_capable(hdev))
3912 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3913 				  MGMT_STATUS_NOT_SUPPORTED);
3914 
3915 	if (hdev_is_powered(hdev))
3916 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3917 				  MGMT_STATUS_REJECTED);
3918 
3919 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3920 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3921 			return cmd_status(sk, hdev->id,
3922 					  MGMT_OP_SET_STATIC_ADDRESS,
3923 					  MGMT_STATUS_INVALID_PARAMS);
3924 
3925 		/* Two most significant bits shall be set */
3926 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3927 			return cmd_status(sk, hdev->id,
3928 					  MGMT_OP_SET_STATIC_ADDRESS,
3929 					  MGMT_STATUS_INVALID_PARAMS);
3930 	}
3931 
3932 	hci_dev_lock(hdev);
3933 
3934 	bacpy(&hdev->static_addr, &cp->bdaddr);
3935 
3936 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3937 
3938 	hci_dev_unlock(hdev);
3939 
3940 	return err;
3941 }
3942 
3943 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3944 			   void *data, u16 len)
3945 {
3946 	struct mgmt_cp_set_scan_params *cp = data;
3947 	__u16 interval, window;
3948 	int err;
3949 
3950 	BT_DBG("%s", hdev->name);
3951 
3952 	if (!lmp_le_capable(hdev))
3953 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3954 				  MGMT_STATUS_NOT_SUPPORTED);
3955 
3956 	interval = __le16_to_cpu(cp->interval);
3957 
3958 	if (interval < 0x0004 || interval > 0x4000)
3959 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3960 				  MGMT_STATUS_INVALID_PARAMS);
3961 
3962 	window = __le16_to_cpu(cp->window);
3963 
3964 	if (window < 0x0004 || window > 0x4000)
3965 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3966 				  MGMT_STATUS_INVALID_PARAMS);
3967 
3968 	if (window > interval)
3969 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3970 				  MGMT_STATUS_INVALID_PARAMS);
3971 
3972 	hci_dev_lock(hdev);
3973 
3974 	hdev->le_scan_interval = interval;
3975 	hdev->le_scan_window = window;
3976 
3977 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3978 
3979 	/* If background scan is running, restart it so new parameters are
3980 	 * loaded.
3981 	 */
3982 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3983 	    hdev->discovery.state == DISCOVERY_STOPPED) {
3984 		struct hci_request req;
3985 
3986 		hci_req_init(&req, hdev);
3987 
3988 		hci_req_add_le_scan_disable(&req);
3989 		hci_req_add_le_passive_scan(&req);
3990 
3991 		hci_req_run(&req, NULL);
3992 	}
3993 
3994 	hci_dev_unlock(hdev);
3995 
3996 	return err;
3997 }
3998 
3999 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4000 {
4001 	struct pending_cmd *cmd;
4002 
4003 	BT_DBG("status 0x%02x", status);
4004 
4005 	hci_dev_lock(hdev);
4006 
4007 	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4008 	if (!cmd)
4009 		goto unlock;
4010 
4011 	if (status) {
4012 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4013 			   mgmt_status(status));
4014 	} else {
4015 		struct mgmt_mode *cp = cmd->param;
4016 
4017 		if (cp->val)
4018 			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4019 		else
4020 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4021 
4022 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4023 		new_settings(hdev, cmd->sk);
4024 	}
4025 
4026 	mgmt_pending_remove(cmd);
4027 
4028 unlock:
4029 	hci_dev_unlock(hdev);
4030 }
4031 
4032 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4033 				void *data, u16 len)
4034 {
4035 	struct mgmt_mode *cp = data;
4036 	struct pending_cmd *cmd;
4037 	struct hci_request req;
4038 	int err;
4039 
4040 	BT_DBG("%s", hdev->name);
4041 
4042 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4043 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4044 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4045 				  MGMT_STATUS_NOT_SUPPORTED);
4046 
4047 	if (cp->val != 0x00 && cp->val != 0x01)
4048 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4049 				  MGMT_STATUS_INVALID_PARAMS);
4050 
4051 	if (!hdev_is_powered(hdev))
4052 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4053 				  MGMT_STATUS_NOT_POWERED);
4054 
4055 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4056 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4057 				  MGMT_STATUS_REJECTED);
4058 
4059 	hci_dev_lock(hdev);
4060 
4061 	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4062 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4063 				 MGMT_STATUS_BUSY);
4064 		goto unlock;
4065 	}
4066 
4067 	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4068 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4069 					hdev);
4070 		goto unlock;
4071 	}
4072 
4073 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4074 			       data, len);
4075 	if (!cmd) {
4076 		err = -ENOMEM;
4077 		goto unlock;
4078 	}
4079 
4080 	hci_req_init(&req, hdev);
4081 
4082 	write_fast_connectable(&req, cp->val);
4083 
4084 	err = hci_req_run(&req, fast_connectable_complete);
4085 	if (err < 0) {
4086 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4087 				 MGMT_STATUS_FAILED);
4088 		mgmt_pending_remove(cmd);
4089 	}
4090 
4091 unlock:
4092 	hci_dev_unlock(hdev);
4093 
4094 	return err;
4095 }
4096 
4097 static void set_bredr_scan(struct hci_request *req)
4098 {
4099 	struct hci_dev *hdev = req->hdev;
4100 	u8 scan = 0;
4101 
4102 	/* Ensure that fast connectable is disabled. This function will
4103 	 * not do anything if the page scan parameters are already what
4104 	 * they should be.
4105 	 */
4106 	write_fast_connectable(req, false);
4107 
4108 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4109 		scan |= SCAN_PAGE;
4110 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4111 		scan |= SCAN_INQUIRY;
4112 
4113 	if (scan)
4114 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4115 }
4116 
4117 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4118 {
4119 	struct pending_cmd *cmd;
4120 
4121 	BT_DBG("status 0x%02x", status);
4122 
4123 	hci_dev_lock(hdev);
4124 
4125 	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4126 	if (!cmd)
4127 		goto unlock;
4128 
4129 	if (status) {
4130 		u8 mgmt_err = mgmt_status(status);
4131 
4132 		/* We need to restore the flag if related HCI commands
4133 		 * failed.
4134 		 */
4135 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4136 
4137 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4138 	} else {
4139 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4140 		new_settings(hdev, cmd->sk);
4141 	}
4142 
4143 	mgmt_pending_remove(cmd);
4144 
4145 unlock:
4146 	hci_dev_unlock(hdev);
4147 }
4148 
4149 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4150 {
4151 	struct mgmt_mode *cp = data;
4152 	struct pending_cmd *cmd;
4153 	struct hci_request req;
4154 	int err;
4155 
4156 	BT_DBG("request for %s", hdev->name);
4157 
4158 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4159 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4160 				  MGMT_STATUS_NOT_SUPPORTED);
4161 
4162 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4163 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4164 				  MGMT_STATUS_REJECTED);
4165 
4166 	if (cp->val != 0x00 && cp->val != 0x01)
4167 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4168 				  MGMT_STATUS_INVALID_PARAMS);
4169 
4170 	hci_dev_lock(hdev);
4171 
4172 	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4173 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4174 		goto unlock;
4175 	}
4176 
4177 	if (!hdev_is_powered(hdev)) {
4178 		if (!cp->val) {
4179 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4180 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4181 			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4182 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4183 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4184 		}
4185 
4186 		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4187 
4188 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4189 		if (err < 0)
4190 			goto unlock;
4191 
4192 		err = new_settings(hdev, sk);
4193 		goto unlock;
4194 	}
4195 
4196 	/* Reject disabling when powered on */
4197 	if (!cp->val) {
4198 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4199 				 MGMT_STATUS_REJECTED);
4200 		goto unlock;
4201 	}
4202 
4203 	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4204 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4205 				 MGMT_STATUS_BUSY);
4206 		goto unlock;
4207 	}
4208 
4209 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4210 	if (!cmd) {
4211 		err = -ENOMEM;
4212 		goto unlock;
4213 	}
4214 
4215 	/* We need to flip the bit already here so that update_adv_data
4216 	 * generates the correct flags.
4217 	 */
4218 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4219 
4220 	hci_req_init(&req, hdev);
4221 
4222 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4223 		set_bredr_scan(&req);
4224 
4225 	/* Since only the advertising data flags will change, there
4226 	 * is no need to update the scan response data.
4227 	 */
4228 	update_adv_data(&req);
4229 
4230 	err = hci_req_run(&req, set_bredr_complete);
4231 	if (err < 0)
4232 		mgmt_pending_remove(cmd);
4233 
4234 unlock:
4235 	hci_dev_unlock(hdev);
4236 	return err;
4237 }
4238 
4239 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4240 			   void *data, u16 len)
4241 {
4242 	struct mgmt_mode *cp = data;
4243 	struct pending_cmd *cmd;
4244 	u8 val, status;
4245 	int err;
4246 
4247 	BT_DBG("request for %s", hdev->name);
4248 
4249 	status = mgmt_bredr_support(hdev);
4250 	if (status)
4251 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4252 				  status);
4253 
4254 	if (!lmp_sc_capable(hdev) &&
4255 	    !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4256 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4257 				  MGMT_STATUS_NOT_SUPPORTED);
4258 
4259 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4260 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4261 				  MGMT_STATUS_INVALID_PARAMS);
4262 
4263 	hci_dev_lock(hdev);
4264 
4265 	if (!hdev_is_powered(hdev)) {
4266 		bool changed;
4267 
4268 		if (cp->val) {
4269 			changed = !test_and_set_bit(HCI_SC_ENABLED,
4270 						    &hdev->dev_flags);
4271 			if (cp->val == 0x02)
4272 				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4273 			else
4274 				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4275 		} else {
4276 			changed = test_and_clear_bit(HCI_SC_ENABLED,
4277 						     &hdev->dev_flags);
4278 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4279 		}
4280 
4281 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4282 		if (err < 0)
4283 			goto failed;
4284 
4285 		if (changed)
4286 			err = new_settings(hdev, sk);
4287 
4288 		goto failed;
4289 	}
4290 
4291 	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4292 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4293 				 MGMT_STATUS_BUSY);
4294 		goto failed;
4295 	}
4296 
4297 	val = !!cp->val;
4298 
4299 	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4300 	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4301 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4302 		goto failed;
4303 	}
4304 
4305 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4306 	if (!cmd) {
4307 		err = -ENOMEM;
4308 		goto failed;
4309 	}
4310 
4311 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4312 	if (err < 0) {
4313 		mgmt_pending_remove(cmd);
4314 		goto failed;
4315 	}
4316 
4317 	if (cp->val == 0x02)
4318 		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4319 	else
4320 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4321 
4322 failed:
4323 	hci_dev_unlock(hdev);
4324 	return err;
4325 }
4326 
4327 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4328 			  void *data, u16 len)
4329 {
4330 	struct mgmt_mode *cp = data;
4331 	bool changed;
4332 	int err;
4333 
4334 	BT_DBG("request for %s", hdev->name);
4335 
4336 	if (cp->val != 0x00 && cp->val != 0x01)
4337 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4338 				  MGMT_STATUS_INVALID_PARAMS);
4339 
4340 	hci_dev_lock(hdev);
4341 
4342 	if (cp->val)
4343 		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4344 	else
4345 		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4346 
4347 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4348 	if (err < 0)
4349 		goto unlock;
4350 
4351 	if (changed)
4352 		err = new_settings(hdev, sk);
4353 
4354 unlock:
4355 	hci_dev_unlock(hdev);
4356 	return err;
4357 }
4358 
4359 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4360 		       u16 len)
4361 {
4362 	struct mgmt_cp_set_privacy *cp = cp_data;
4363 	bool changed;
4364 	int err;
4365 
4366 	BT_DBG("request for %s", hdev->name);
4367 
4368 	if (!lmp_le_capable(hdev))
4369 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4370 				  MGMT_STATUS_NOT_SUPPORTED);
4371 
4372 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
4373 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4374 				  MGMT_STATUS_INVALID_PARAMS);
4375 
4376 	if (hdev_is_powered(hdev))
4377 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4378 				  MGMT_STATUS_REJECTED);
4379 
4380 	hci_dev_lock(hdev);
4381 
4382 	/* If user space supports this command it is also expected to
4383 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4384 	 */
4385 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4386 
4387 	if (cp->privacy) {
4388 		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4389 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4390 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4391 	} else {
4392 		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4393 		memset(hdev->irk, 0, sizeof(hdev->irk));
4394 		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4395 	}
4396 
4397 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4398 	if (err < 0)
4399 		goto unlock;
4400 
4401 	if (changed)
4402 		err = new_settings(hdev, sk);
4403 
4404 unlock:
4405 	hci_dev_unlock(hdev);
4406 	return err;
4407 }
4408 
4409 static bool irk_is_valid(struct mgmt_irk_info *irk)
4410 {
4411 	switch (irk->addr.type) {
4412 	case BDADDR_LE_PUBLIC:
4413 		return true;
4414 
4415 	case BDADDR_LE_RANDOM:
4416 		/* Two most significant bits shall be set */
4417 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4418 			return false;
4419 		return true;
4420 	}
4421 
4422 	return false;
4423 }
4424 
4425 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4426 		     u16 len)
4427 {
4428 	struct mgmt_cp_load_irks *cp = cp_data;
4429 	u16 irk_count, expected_len;
4430 	int i, err;
4431 
4432 	BT_DBG("request for %s", hdev->name);
4433 
4434 	if (!lmp_le_capable(hdev))
4435 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4436 				  MGMT_STATUS_NOT_SUPPORTED);
4437 
4438 	irk_count = __le16_to_cpu(cp->irk_count);
4439 
4440 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4441 	if (expected_len != len) {
4442 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4443 		       expected_len, len);
4444 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4445 				  MGMT_STATUS_INVALID_PARAMS);
4446 	}
4447 
4448 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4449 
4450 	for (i = 0; i < irk_count; i++) {
4451 		struct mgmt_irk_info *key = &cp->irks[i];
4452 
4453 		if (!irk_is_valid(key))
4454 			return cmd_status(sk, hdev->id,
4455 					  MGMT_OP_LOAD_IRKS,
4456 					  MGMT_STATUS_INVALID_PARAMS);
4457 	}
4458 
4459 	hci_dev_lock(hdev);
4460 
4461 	hci_smp_irks_clear(hdev);
4462 
4463 	for (i = 0; i < irk_count; i++) {
4464 		struct mgmt_irk_info *irk = &cp->irks[i];
4465 		u8 addr_type;
4466 
4467 		if (irk->addr.type == BDADDR_LE_PUBLIC)
4468 			addr_type = ADDR_LE_DEV_PUBLIC;
4469 		else
4470 			addr_type = ADDR_LE_DEV_RANDOM;
4471 
4472 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4473 			    BDADDR_ANY);
4474 	}
4475 
4476 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4477 
4478 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4479 
4480 	hci_dev_unlock(hdev);
4481 
4482 	return err;
4483 }
4484 
4485 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4486 {
4487 	if (key->master != 0x00 && key->master != 0x01)
4488 		return false;
4489 
4490 	switch (key->addr.type) {
4491 	case BDADDR_LE_PUBLIC:
4492 		return true;
4493 
4494 	case BDADDR_LE_RANDOM:
4495 		/* Two most significant bits shall be set */
4496 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4497 			return false;
4498 		return true;
4499 	}
4500 
4501 	return false;
4502 }
4503 
4504 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4505 			       void *cp_data, u16 len)
4506 {
4507 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4508 	u16 key_count, expected_len;
4509 	int i, err;
4510 
4511 	BT_DBG("request for %s", hdev->name);
4512 
4513 	if (!lmp_le_capable(hdev))
4514 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4515 				  MGMT_STATUS_NOT_SUPPORTED);
4516 
4517 	key_count = __le16_to_cpu(cp->key_count);
4518 
4519 	expected_len = sizeof(*cp) + key_count *
4520 					sizeof(struct mgmt_ltk_info);
4521 	if (expected_len != len) {
4522 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4523 		       expected_len, len);
4524 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4525 				  MGMT_STATUS_INVALID_PARAMS);
4526 	}
4527 
4528 	BT_DBG("%s key_count %u", hdev->name, key_count);
4529 
4530 	for (i = 0; i < key_count; i++) {
4531 		struct mgmt_ltk_info *key = &cp->keys[i];
4532 
4533 		if (!ltk_is_valid(key))
4534 			return cmd_status(sk, hdev->id,
4535 					  MGMT_OP_LOAD_LONG_TERM_KEYS,
4536 					  MGMT_STATUS_INVALID_PARAMS);
4537 	}
4538 
4539 	hci_dev_lock(hdev);
4540 
4541 	hci_smp_ltks_clear(hdev);
4542 
4543 	for (i = 0; i < key_count; i++) {
4544 		struct mgmt_ltk_info *key = &cp->keys[i];
4545 		u8 type, addr_type, authenticated;
4546 
4547 		if (key->addr.type == BDADDR_LE_PUBLIC)
4548 			addr_type = ADDR_LE_DEV_PUBLIC;
4549 		else
4550 			addr_type = ADDR_LE_DEV_RANDOM;
4551 
4552 		if (key->master)
4553 			type = HCI_SMP_LTK;
4554 		else
4555 			type = HCI_SMP_LTK_SLAVE;
4556 
4557 		switch (key->type) {
4558 		case MGMT_LTK_UNAUTHENTICATED:
4559 			authenticated = 0x00;
4560 			break;
4561 		case MGMT_LTK_AUTHENTICATED:
4562 			authenticated = 0x01;
4563 			break;
4564 		default:
4565 			continue;
4566 		}
4567 
4568 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4569 			    authenticated, key->val, key->enc_size, key->ediv,
4570 			    key->rand);
4571 	}
4572 
4573 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4574 			   NULL, 0);
4575 
4576 	hci_dev_unlock(hdev);
4577 
4578 	return err;
4579 }
4580 
4581 struct cmd_conn_lookup {
4582 	struct hci_conn *conn;
4583 	bool valid_tx_power;
4584 	u8 mgmt_status;
4585 };
4586 
4587 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4588 {
4589 	struct cmd_conn_lookup *match = data;
4590 	struct mgmt_cp_get_conn_info *cp;
4591 	struct mgmt_rp_get_conn_info rp;
4592 	struct hci_conn *conn = cmd->user_data;
4593 
4594 	if (conn != match->conn)
4595 		return;
4596 
4597 	cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4598 
4599 	memset(&rp, 0, sizeof(rp));
4600 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4601 	rp.addr.type = cp->addr.type;
4602 
4603 	if (!match->mgmt_status) {
4604 		rp.rssi = conn->rssi;
4605 
4606 		if (match->valid_tx_power) {
4607 			rp.tx_power = conn->tx_power;
4608 			rp.max_tx_power = conn->max_tx_power;
4609 		} else {
4610 			rp.tx_power = HCI_TX_POWER_INVALID;
4611 			rp.max_tx_power = HCI_TX_POWER_INVALID;
4612 		}
4613 	}
4614 
4615 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4616 		     match->mgmt_status, &rp, sizeof(rp));
4617 
4618 	hci_conn_drop(conn);
4619 
4620 	mgmt_pending_remove(cmd);
4621 }
4622 
4623 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4624 {
4625 	struct hci_cp_read_rssi *cp;
4626 	struct hci_conn *conn;
4627 	struct cmd_conn_lookup match;
4628 	u16 handle;
4629 
4630 	BT_DBG("status 0x%02x", status);
4631 
4632 	hci_dev_lock(hdev);
4633 
4634 	/* TX power data is valid in case request completed successfully,
4635 	 * otherwise we assume it's not valid. At the moment we assume that
4636 	 * either both or none of current and max values are valid to keep code
4637 	 * simple.
4638 	 */
4639 	match.valid_tx_power = !status;
4640 
4641 	/* Commands sent in request are either Read RSSI or Read Transmit Power
4642 	 * Level so we check which one was last sent to retrieve connection
4643 	 * handle.  Both commands have handle as first parameter so it's safe to
4644 	 * cast data on the same command struct.
4645 	 *
4646 	 * First command sent is always Read RSSI and we fail only if it fails.
4647 	 * In other case we simply override error to indicate success as we
4648 	 * already remembered if TX power value is actually valid.
4649 	 */
4650 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4651 	if (!cp) {
4652 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4653 		status = 0;
4654 	}
4655 
4656 	if (!cp) {
4657 		BT_ERR("invalid sent_cmd in response");
4658 		goto unlock;
4659 	}
4660 
4661 	handle = __le16_to_cpu(cp->handle);
4662 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4663 	if (!conn) {
4664 		BT_ERR("unknown handle (%d) in response", handle);
4665 		goto unlock;
4666 	}
4667 
4668 	match.conn = conn;
4669 	match.mgmt_status = mgmt_status(status);
4670 
4671 	/* Cache refresh is complete, now reply for mgmt request for given
4672 	 * connection only.
4673 	 */
4674 	mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4675 			     get_conn_info_complete, &match);
4676 
4677 unlock:
4678 	hci_dev_unlock(hdev);
4679 }
4680 
4681 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4682 			 u16 len)
4683 {
4684 	struct mgmt_cp_get_conn_info *cp = data;
4685 	struct mgmt_rp_get_conn_info rp;
4686 	struct hci_conn *conn;
4687 	unsigned long conn_info_age;
4688 	int err = 0;
4689 
4690 	BT_DBG("%s", hdev->name);
4691 
4692 	memset(&rp, 0, sizeof(rp));
4693 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4694 	rp.addr.type = cp->addr.type;
4695 
4696 	if (!bdaddr_type_is_valid(cp->addr.type))
4697 		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4698 				    MGMT_STATUS_INVALID_PARAMS,
4699 				    &rp, sizeof(rp));
4700 
4701 	hci_dev_lock(hdev);
4702 
4703 	if (!hdev_is_powered(hdev)) {
4704 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4705 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4706 		goto unlock;
4707 	}
4708 
4709 	if (cp->addr.type == BDADDR_BREDR)
4710 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4711 					       &cp->addr.bdaddr);
4712 	else
4713 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4714 
4715 	if (!conn || conn->state != BT_CONNECTED) {
4716 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4717 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4718 		goto unlock;
4719 	}
4720 
4721 	/* To avoid client trying to guess when to poll again for information we
4722 	 * calculate conn info age as random value between min/max set in hdev.
4723 	 */
4724 	conn_info_age = hdev->conn_info_min_age +
4725 			prandom_u32_max(hdev->conn_info_max_age -
4726 					hdev->conn_info_min_age);
4727 
4728 	/* Query controller to refresh cached values if they are too old or were
4729 	 * never read.
4730 	 */
4731 	if (time_after(jiffies, conn->conn_info_timestamp +
4732 		       msecs_to_jiffies(conn_info_age)) ||
4733 	    !conn->conn_info_timestamp) {
4734 		struct hci_request req;
4735 		struct hci_cp_read_tx_power req_txp_cp;
4736 		struct hci_cp_read_rssi req_rssi_cp;
4737 		struct pending_cmd *cmd;
4738 
4739 		hci_req_init(&req, hdev);
4740 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
4741 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4742 			    &req_rssi_cp);
4743 
4744 		/* For LE links TX power does not change thus we don't need to
4745 		 * query for it once value is known.
4746 		 */
4747 		if (!bdaddr_type_is_le(cp->addr.type) ||
4748 		    conn->tx_power == HCI_TX_POWER_INVALID) {
4749 			req_txp_cp.handle = cpu_to_le16(conn->handle);
4750 			req_txp_cp.type = 0x00;
4751 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
4752 				    sizeof(req_txp_cp), &req_txp_cp);
4753 		}
4754 
4755 		/* Max TX power needs to be read only once per connection */
4756 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4757 			req_txp_cp.handle = cpu_to_le16(conn->handle);
4758 			req_txp_cp.type = 0x01;
4759 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
4760 				    sizeof(req_txp_cp), &req_txp_cp);
4761 		}
4762 
4763 		err = hci_req_run(&req, conn_info_refresh_complete);
4764 		if (err < 0)
4765 			goto unlock;
4766 
4767 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4768 				       data, len);
4769 		if (!cmd) {
4770 			err = -ENOMEM;
4771 			goto unlock;
4772 		}
4773 
4774 		hci_conn_hold(conn);
4775 		cmd->user_data = conn;
4776 
4777 		conn->conn_info_timestamp = jiffies;
4778 	} else {
4779 		/* Cache is valid, just reply with values cached in hci_conn */
4780 		rp.rssi = conn->rssi;
4781 		rp.tx_power = conn->tx_power;
4782 		rp.max_tx_power = conn->max_tx_power;
4783 
4784 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4785 				   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4786 	}
4787 
4788 unlock:
4789 	hci_dev_unlock(hdev);
4790 	return err;
4791 }
4792 
4793 static const struct mgmt_handler {
4794 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4795 		     u16 data_len);
4796 	bool var_len;
4797 	size_t data_len;
4798 } mgmt_handlers[] = {
4799 	{ NULL }, /* 0x0000 (no command) */
4800 	{ read_version,           false, MGMT_READ_VERSION_SIZE },
4801 	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
4802 	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
4803 	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
4804 	{ set_powered,            false, MGMT_SETTING_SIZE },
4805 	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
4806 	{ set_connectable,        false, MGMT_SETTING_SIZE },
4807 	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
4808 	{ set_pairable,           false, MGMT_SETTING_SIZE },
4809 	{ set_link_security,      false, MGMT_SETTING_SIZE },
4810 	{ set_ssp,                false, MGMT_SETTING_SIZE },
4811 	{ set_hs,                 false, MGMT_SETTING_SIZE },
4812 	{ set_le,                 false, MGMT_SETTING_SIZE },
4813 	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
4814 	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
4815 	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
4816 	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
4817 	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
4818 	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4819 	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
4820 	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
4821 	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
4822 	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4823 	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
4824 	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
4825 	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4826 	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
4827 	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
4828 	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4829 	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
4830 	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4831 	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4832 	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4833 	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4834 	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
4835 	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
4836 	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
4837 	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
4838 	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4839 	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4840 	{ set_advertising,        false, MGMT_SETTING_SIZE },
4841 	{ set_bredr,              false, MGMT_SETTING_SIZE },
4842 	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4843 	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4844 	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
4845 	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
4846 	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
4847 	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
4848 	{ get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },
4849 };
4850 
4851 
4852 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4853 {
4854 	void *buf;
4855 	u8 *cp;
4856 	struct mgmt_hdr *hdr;
4857 	u16 opcode, index, len;
4858 	struct hci_dev *hdev = NULL;
4859 	const struct mgmt_handler *handler;
4860 	int err;
4861 
4862 	BT_DBG("got %zu bytes", msglen);
4863 
4864 	if (msglen < sizeof(*hdr))
4865 		return -EINVAL;
4866 
4867 	buf = kmalloc(msglen, GFP_KERNEL);
4868 	if (!buf)
4869 		return -ENOMEM;
4870 
4871 	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4872 		err = -EFAULT;
4873 		goto done;
4874 	}
4875 
4876 	hdr = buf;
4877 	opcode = __le16_to_cpu(hdr->opcode);
4878 	index = __le16_to_cpu(hdr->index);
4879 	len = __le16_to_cpu(hdr->len);
4880 
4881 	if (len != msglen - sizeof(*hdr)) {
4882 		err = -EINVAL;
4883 		goto done;
4884 	}
4885 
4886 	if (index != MGMT_INDEX_NONE) {
4887 		hdev = hci_dev_get(index);
4888 		if (!hdev) {
4889 			err = cmd_status(sk, index, opcode,
4890 					 MGMT_STATUS_INVALID_INDEX);
4891 			goto done;
4892 		}
4893 
4894 		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4895 		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4896 			err = cmd_status(sk, index, opcode,
4897 					 MGMT_STATUS_INVALID_INDEX);
4898 			goto done;
4899 		}
4900 	}
4901 
4902 	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4903 	    mgmt_handlers[opcode].func == NULL) {
4904 		BT_DBG("Unknown op %u", opcode);
4905 		err = cmd_status(sk, index, opcode,
4906 				 MGMT_STATUS_UNKNOWN_COMMAND);
4907 		goto done;
4908 	}
4909 
4910 	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4911 	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4912 		err = cmd_status(sk, index, opcode,
4913 				 MGMT_STATUS_INVALID_INDEX);
4914 		goto done;
4915 	}
4916 
4917 	handler = &mgmt_handlers[opcode];
4918 
4919 	if ((handler->var_len && len < handler->data_len) ||
4920 	    (!handler->var_len && len != handler->data_len)) {
4921 		err = cmd_status(sk, index, opcode,
4922 				 MGMT_STATUS_INVALID_PARAMS);
4923 		goto done;
4924 	}
4925 
4926 	if (hdev)
4927 		mgmt_init_hdev(sk, hdev);
4928 
4929 	cp = buf + sizeof(*hdr);
4930 
4931 	err = handler->func(sk, hdev, cp, len);
4932 	if (err < 0)
4933 		goto done;
4934 
4935 	err = msglen;
4936 
4937 done:
4938 	if (hdev)
4939 		hci_dev_put(hdev);
4940 
4941 	kfree(buf);
4942 	return err;
4943 }
4944 
4945 void mgmt_index_added(struct hci_dev *hdev)
4946 {
4947 	if (hdev->dev_type != HCI_BREDR)
4948 		return;
4949 
4950 	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4951 }
4952 
4953 void mgmt_index_removed(struct hci_dev *hdev)
4954 {
4955 	u8 status = MGMT_STATUS_INVALID_INDEX;
4956 
4957 	if (hdev->dev_type != HCI_BREDR)
4958 		return;
4959 
4960 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4961 
4962 	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4963 }
4964 
4965 /* This function requires the caller holds hdev->lock */
4966 static void restart_le_auto_conns(struct hci_dev *hdev)
4967 {
4968 	struct hci_conn_params *p;
4969 
4970 	list_for_each_entry(p, &hdev->le_conn_params, list) {
4971 		if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4972 			hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4973 	}
4974 }
4975 
4976 static void powered_complete(struct hci_dev *hdev, u8 status)
4977 {
4978 	struct cmd_lookup match = { NULL, hdev };
4979 
4980 	BT_DBG("status 0x%02x", status);
4981 
4982 	hci_dev_lock(hdev);
4983 
4984 	restart_le_auto_conns(hdev);
4985 
4986 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4987 
4988 	new_settings(hdev, match.sk);
4989 
4990 	hci_dev_unlock(hdev);
4991 
4992 	if (match.sk)
4993 		sock_put(match.sk);
4994 }
4995 
4996 static int powered_update_hci(struct hci_dev *hdev)
4997 {
4998 	struct hci_request req;
4999 	u8 link_sec;
5000 
5001 	hci_req_init(&req, hdev);
5002 
5003 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5004 	    !lmp_host_ssp_capable(hdev)) {
5005 		u8 ssp = 1;
5006 
5007 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5008 	}
5009 
5010 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5011 	    lmp_bredr_capable(hdev)) {
5012 		struct hci_cp_write_le_host_supported cp;
5013 
5014 		cp.le = 1;
5015 		cp.simul = lmp_le_br_capable(hdev);
5016 
5017 		/* Check first if we already have the right
5018 		 * host state (host features set)
5019 		 */
5020 		if (cp.le != lmp_host_le_capable(hdev) ||
5021 		    cp.simul != lmp_host_le_br_capable(hdev))
5022 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5023 				    sizeof(cp), &cp);
5024 	}
5025 
5026 	if (lmp_le_capable(hdev)) {
5027 		/* Make sure the controller has a good default for
5028 		 * advertising data. This also applies to the case
5029 		 * where BR/EDR was toggled during the AUTO_OFF phase.
5030 		 */
5031 		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5032 			update_adv_data(&req);
5033 			update_scan_rsp_data(&req);
5034 		}
5035 
5036 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5037 			enable_advertising(&req);
5038 	}
5039 
5040 	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5041 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5042 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5043 			    sizeof(link_sec), &link_sec);
5044 
5045 	if (lmp_bredr_capable(hdev)) {
5046 		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5047 			set_bredr_scan(&req);
5048 		update_class(&req);
5049 		update_name(&req);
5050 		update_eir(&req);
5051 	}
5052 
5053 	return hci_req_run(&req, powered_complete);
5054 }
5055 
5056 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5057 {
5058 	struct cmd_lookup match = { NULL, hdev };
5059 	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5060 	u8 zero_cod[] = { 0, 0, 0 };
5061 	int err;
5062 
5063 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5064 		return 0;
5065 
5066 	if (powered) {
5067 		if (powered_update_hci(hdev) == 0)
5068 			return 0;
5069 
5070 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5071 				     &match);
5072 		goto new_settings;
5073 	}
5074 
5075 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5076 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5077 
5078 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5079 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5080 			   zero_cod, sizeof(zero_cod), NULL);
5081 
5082 new_settings:
5083 	err = new_settings(hdev, match.sk);
5084 
5085 	if (match.sk)
5086 		sock_put(match.sk);
5087 
5088 	return err;
5089 }
5090 
5091 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5092 {
5093 	struct pending_cmd *cmd;
5094 	u8 status;
5095 
5096 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5097 	if (!cmd)
5098 		return;
5099 
5100 	if (err == -ERFKILL)
5101 		status = MGMT_STATUS_RFKILLED;
5102 	else
5103 		status = MGMT_STATUS_FAILED;
5104 
5105 	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5106 
5107 	mgmt_pending_remove(cmd);
5108 }
5109 
5110 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5111 {
5112 	struct hci_request req;
5113 
5114 	hci_dev_lock(hdev);
5115 
5116 	/* When discoverable timeout triggers, then just make sure
5117 	 * the limited discoverable flag is cleared. Even in the case
5118 	 * of a timeout triggered from general discoverable, it is
5119 	 * safe to unconditionally clear the flag.
5120 	 */
5121 	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5122 	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5123 
5124 	hci_req_init(&req, hdev);
5125 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5126 		u8 scan = SCAN_PAGE;
5127 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5128 			    sizeof(scan), &scan);
5129 	}
5130 	update_class(&req);
5131 	update_adv_data(&req);
5132 	hci_req_run(&req, NULL);
5133 
5134 	hdev->discov_timeout = 0;
5135 
5136 	new_settings(hdev, NULL);
5137 
5138 	hci_dev_unlock(hdev);
5139 }
5140 
5141 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5142 {
5143 	bool changed;
5144 
5145 	/* Nothing needed here if there's a pending command since that
5146 	 * commands request completion callback takes care of everything
5147 	 * necessary.
5148 	 */
5149 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5150 		return;
5151 
5152 	/* Powering off may clear the scan mode - don't let that interfere */
5153 	if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5154 		return;
5155 
5156 	if (discoverable) {
5157 		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5158 	} else {
5159 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5160 		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5161 	}
5162 
5163 	if (changed) {
5164 		struct hci_request req;
5165 
5166 		/* In case this change in discoverable was triggered by
5167 		 * a disabling of connectable there could be a need to
5168 		 * update the advertising flags.
5169 		 */
5170 		hci_req_init(&req, hdev);
5171 		update_adv_data(&req);
5172 		hci_req_run(&req, NULL);
5173 
5174 		new_settings(hdev, NULL);
5175 	}
5176 }
5177 
5178 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5179 {
5180 	bool changed;
5181 
5182 	/* Nothing needed here if there's a pending command since that
5183 	 * commands request completion callback takes care of everything
5184 	 * necessary.
5185 	 */
5186 	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5187 		return;
5188 
5189 	/* Powering off may clear the scan mode - don't let that interfere */
5190 	if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5191 		return;
5192 
5193 	if (connectable)
5194 		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5195 	else
5196 		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5197 
5198 	if (changed)
5199 		new_settings(hdev, NULL);
5200 }
5201 
5202 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5203 {
5204 	/* Powering off may stop advertising - don't let that interfere */
5205 	if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5206 		return;
5207 
5208 	if (advertising)
5209 		set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5210 	else
5211 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5212 }
5213 
5214 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5215 {
5216 	u8 mgmt_err = mgmt_status(status);
5217 
5218 	if (scan & SCAN_PAGE)
5219 		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5220 				     cmd_status_rsp, &mgmt_err);
5221 
5222 	if (scan & SCAN_INQUIRY)
5223 		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5224 				     cmd_status_rsp, &mgmt_err);
5225 }
5226 
5227 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5228 		       bool persistent)
5229 {
5230 	struct mgmt_ev_new_link_key ev;
5231 
5232 	memset(&ev, 0, sizeof(ev));
5233 
5234 	ev.store_hint = persistent;
5235 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5236 	ev.key.addr.type = BDADDR_BREDR;
5237 	ev.key.type = key->type;
5238 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5239 	ev.key.pin_len = key->pin_len;
5240 
5241 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5242 }
5243 
5244 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5245 {
5246 	if (ltk->authenticated)
5247 		return MGMT_LTK_AUTHENTICATED;
5248 
5249 	return MGMT_LTK_UNAUTHENTICATED;
5250 }
5251 
5252 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5253 {
5254 	struct mgmt_ev_new_long_term_key ev;
5255 
5256 	memset(&ev, 0, sizeof(ev));
5257 
5258 	/* Devices using resolvable or non-resolvable random addresses
5259 	 * without providing an indentity resolving key don't require
5260 	 * to store long term keys. Their addresses will change the
5261 	 * next time around.
5262 	 *
5263 	 * Only when a remote device provides an identity address
5264 	 * make sure the long term key is stored. If the remote
5265 	 * identity is known, the long term keys are internally
5266 	 * mapped to the identity address. So allow static random
5267 	 * and public addresses here.
5268 	 */
5269 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5270 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
5271 		ev.store_hint = 0x00;
5272 	else
5273 		ev.store_hint = persistent;
5274 
5275 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5276 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5277 	ev.key.type = mgmt_ltk_type(key);
5278 	ev.key.enc_size = key->enc_size;
5279 	ev.key.ediv = key->ediv;
5280 	ev.key.rand = key->rand;
5281 
5282 	if (key->type == HCI_SMP_LTK)
5283 		ev.key.master = 1;
5284 
5285 	memcpy(ev.key.val, key->val, sizeof(key->val));
5286 
5287 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5288 }
5289 
5290 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5291 {
5292 	struct mgmt_ev_new_irk ev;
5293 
5294 	memset(&ev, 0, sizeof(ev));
5295 
5296 	/* For identity resolving keys from devices that are already
5297 	 * using a public address or static random address, do not
5298 	 * ask for storing this key. The identity resolving key really
5299 	 * is only mandatory for devices using resovlable random
5300 	 * addresses.
5301 	 *
5302 	 * Storing all identity resolving keys has the downside that
5303 	 * they will be also loaded on next boot of they system. More
5304 	 * identity resolving keys, means more time during scanning is
5305 	 * needed to actually resolve these addresses.
5306 	 */
5307 	if (bacmp(&irk->rpa, BDADDR_ANY))
5308 		ev.store_hint = 0x01;
5309 	else
5310 		ev.store_hint = 0x00;
5311 
5312 	bacpy(&ev.rpa, &irk->rpa);
5313 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5314 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5315 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5316 
5317 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5318 }
5319 
5320 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5321 		   bool persistent)
5322 {
5323 	struct mgmt_ev_new_csrk ev;
5324 
5325 	memset(&ev, 0, sizeof(ev));
5326 
5327 	/* Devices using resolvable or non-resolvable random addresses
5328 	 * without providing an indentity resolving key don't require
5329 	 * to store signature resolving keys. Their addresses will change
5330 	 * the next time around.
5331 	 *
5332 	 * Only when a remote device provides an identity address
5333 	 * make sure the signature resolving key is stored. So allow
5334 	 * static random and public addresses here.
5335 	 */
5336 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5337 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5338 		ev.store_hint = 0x00;
5339 	else
5340 		ev.store_hint = persistent;
5341 
5342 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5343 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5344 	ev.key.master = csrk->master;
5345 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5346 
5347 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5348 }
5349 
5350 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5351 				  u8 data_len)
5352 {
5353 	eir[eir_len++] = sizeof(type) + data_len;
5354 	eir[eir_len++] = type;
5355 	memcpy(&eir[eir_len], data, data_len);
5356 	eir_len += data_len;
5357 
5358 	return eir_len;
5359 }
5360 
5361 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5362 			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
5363 			   u8 *dev_class)
5364 {
5365 	char buf[512];
5366 	struct mgmt_ev_device_connected *ev = (void *) buf;
5367 	u16 eir_len = 0;
5368 
5369 	bacpy(&ev->addr.bdaddr, bdaddr);
5370 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5371 
5372 	ev->flags = __cpu_to_le32(flags);
5373 
5374 	if (name_len > 0)
5375 		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5376 					  name, name_len);
5377 
5378 	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5379 		eir_len = eir_append_data(ev->eir, eir_len,
5380 					  EIR_CLASS_OF_DEV, dev_class, 3);
5381 
5382 	ev->eir_len = cpu_to_le16(eir_len);
5383 
5384 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5385 		    sizeof(*ev) + eir_len, NULL);
5386 }
5387 
5388 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5389 {
5390 	struct mgmt_cp_disconnect *cp = cmd->param;
5391 	struct sock **sk = data;
5392 	struct mgmt_rp_disconnect rp;
5393 
5394 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5395 	rp.addr.type = cp->addr.type;
5396 
5397 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5398 		     sizeof(rp));
5399 
5400 	*sk = cmd->sk;
5401 	sock_hold(*sk);
5402 
5403 	mgmt_pending_remove(cmd);
5404 }
5405 
5406 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5407 {
5408 	struct hci_dev *hdev = data;
5409 	struct mgmt_cp_unpair_device *cp = cmd->param;
5410 	struct mgmt_rp_unpair_device rp;
5411 
5412 	memset(&rp, 0, sizeof(rp));
5413 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5414 	rp.addr.type = cp->addr.type;
5415 
5416 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5417 
5418 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5419 
5420 	mgmt_pending_remove(cmd);
5421 }
5422 
5423 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5424 			      u8 link_type, u8 addr_type, u8 reason,
5425 			      bool mgmt_connected)
5426 {
5427 	struct mgmt_ev_device_disconnected ev;
5428 	struct pending_cmd *power_off;
5429 	struct sock *sk = NULL;
5430 
5431 	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5432 	if (power_off) {
5433 		struct mgmt_mode *cp = power_off->param;
5434 
5435 		/* The connection is still in hci_conn_hash so test for 1
5436 		 * instead of 0 to know if this is the last one.
5437 		 */
5438 		if (!cp->val && hci_conn_count(hdev) == 1) {
5439 			cancel_delayed_work(&hdev->power_off);
5440 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
5441 		}
5442 	}
5443 
5444 	if (!mgmt_connected)
5445 		return;
5446 
5447 	if (link_type != ACL_LINK && link_type != LE_LINK)
5448 		return;
5449 
5450 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5451 
5452 	bacpy(&ev.addr.bdaddr, bdaddr);
5453 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5454 	ev.reason = reason;
5455 
5456 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5457 
5458 	if (sk)
5459 		sock_put(sk);
5460 
5461 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5462 			     hdev);
5463 }
5464 
5465 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5466 			    u8 link_type, u8 addr_type, u8 status)
5467 {
5468 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5469 	struct mgmt_cp_disconnect *cp;
5470 	struct mgmt_rp_disconnect rp;
5471 	struct pending_cmd *cmd;
5472 
5473 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5474 			     hdev);
5475 
5476 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5477 	if (!cmd)
5478 		return;
5479 
5480 	cp = cmd->param;
5481 
5482 	if (bacmp(bdaddr, &cp->addr.bdaddr))
5483 		return;
5484 
5485 	if (cp->addr.type != bdaddr_type)
5486 		return;
5487 
5488 	bacpy(&rp.addr.bdaddr, bdaddr);
5489 	rp.addr.type = bdaddr_type;
5490 
5491 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5492 		     mgmt_status(status), &rp, sizeof(rp));
5493 
5494 	mgmt_pending_remove(cmd);
5495 }
5496 
5497 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5498 			 u8 addr_type, u8 status)
5499 {
5500 	struct mgmt_ev_connect_failed ev;
5501 	struct pending_cmd *power_off;
5502 
5503 	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5504 	if (power_off) {
5505 		struct mgmt_mode *cp = power_off->param;
5506 
5507 		/* The connection is still in hci_conn_hash so test for 1
5508 		 * instead of 0 to know if this is the last one.
5509 		 */
5510 		if (!cp->val && hci_conn_count(hdev) == 1) {
5511 			cancel_delayed_work(&hdev->power_off);
5512 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
5513 		}
5514 	}
5515 
5516 	bacpy(&ev.addr.bdaddr, bdaddr);
5517 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5518 	ev.status = mgmt_status(status);
5519 
5520 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5521 }
5522 
5523 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5524 {
5525 	struct mgmt_ev_pin_code_request ev;
5526 
5527 	bacpy(&ev.addr.bdaddr, bdaddr);
5528 	ev.addr.type = BDADDR_BREDR;
5529 	ev.secure = secure;
5530 
5531 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5532 }
5533 
5534 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5535 				  u8 status)
5536 {
5537 	struct pending_cmd *cmd;
5538 	struct mgmt_rp_pin_code_reply rp;
5539 
5540 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5541 	if (!cmd)
5542 		return;
5543 
5544 	bacpy(&rp.addr.bdaddr, bdaddr);
5545 	rp.addr.type = BDADDR_BREDR;
5546 
5547 	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5548 		     mgmt_status(status), &rp, sizeof(rp));
5549 
5550 	mgmt_pending_remove(cmd);
5551 }
5552 
5553 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5554 				      u8 status)
5555 {
5556 	struct pending_cmd *cmd;
5557 	struct mgmt_rp_pin_code_reply rp;
5558 
5559 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5560 	if (!cmd)
5561 		return;
5562 
5563 	bacpy(&rp.addr.bdaddr, bdaddr);
5564 	rp.addr.type = BDADDR_BREDR;
5565 
5566 	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5567 		     mgmt_status(status), &rp, sizeof(rp));
5568 
5569 	mgmt_pending_remove(cmd);
5570 }
5571 
5572 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5573 			      u8 link_type, u8 addr_type, u32 value,
5574 			      u8 confirm_hint)
5575 {
5576 	struct mgmt_ev_user_confirm_request ev;
5577 
5578 	BT_DBG("%s", hdev->name);
5579 
5580 	bacpy(&ev.addr.bdaddr, bdaddr);
5581 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5582 	ev.confirm_hint = confirm_hint;
5583 	ev.value = cpu_to_le32(value);
5584 
5585 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5586 			  NULL);
5587 }
5588 
5589 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5590 			      u8 link_type, u8 addr_type)
5591 {
5592 	struct mgmt_ev_user_passkey_request ev;
5593 
5594 	BT_DBG("%s", hdev->name);
5595 
5596 	bacpy(&ev.addr.bdaddr, bdaddr);
5597 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5598 
5599 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5600 			  NULL);
5601 }
5602 
5603 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5604 				      u8 link_type, u8 addr_type, u8 status,
5605 				      u8 opcode)
5606 {
5607 	struct pending_cmd *cmd;
5608 	struct mgmt_rp_user_confirm_reply rp;
5609 	int err;
5610 
5611 	cmd = mgmt_pending_find(opcode, hdev);
5612 	if (!cmd)
5613 		return -ENOENT;
5614 
5615 	bacpy(&rp.addr.bdaddr, bdaddr);
5616 	rp.addr.type = link_to_bdaddr(link_type, addr_type);
5617 	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5618 			   &rp, sizeof(rp));
5619 
5620 	mgmt_pending_remove(cmd);
5621 
5622 	return err;
5623 }
5624 
5625 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5626 				     u8 link_type, u8 addr_type, u8 status)
5627 {
5628 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5629 					  status, MGMT_OP_USER_CONFIRM_REPLY);
5630 }
5631 
5632 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5633 					 u8 link_type, u8 addr_type, u8 status)
5634 {
5635 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5636 					  status,
5637 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
5638 }
5639 
5640 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5641 				     u8 link_type, u8 addr_type, u8 status)
5642 {
5643 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5644 					  status, MGMT_OP_USER_PASSKEY_REPLY);
5645 }
5646 
5647 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5648 					 u8 link_type, u8 addr_type, u8 status)
5649 {
5650 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5651 					  status,
5652 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
5653 }
5654 
5655 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5656 			     u8 link_type, u8 addr_type, u32 passkey,
5657 			     u8 entered)
5658 {
5659 	struct mgmt_ev_passkey_notify ev;
5660 
5661 	BT_DBG("%s", hdev->name);
5662 
5663 	bacpy(&ev.addr.bdaddr, bdaddr);
5664 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5665 	ev.passkey = __cpu_to_le32(passkey);
5666 	ev.entered = entered;
5667 
5668 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5669 }
5670 
5671 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5672 		      u8 addr_type, u8 status)
5673 {
5674 	struct mgmt_ev_auth_failed ev;
5675 
5676 	bacpy(&ev.addr.bdaddr, bdaddr);
5677 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5678 	ev.status = mgmt_status(status);
5679 
5680 	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5681 }
5682 
5683 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5684 {
5685 	struct cmd_lookup match = { NULL, hdev };
5686 	bool changed;
5687 
5688 	if (status) {
5689 		u8 mgmt_err = mgmt_status(status);
5690 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5691 				     cmd_status_rsp, &mgmt_err);
5692 		return;
5693 	}
5694 
5695 	if (test_bit(HCI_AUTH, &hdev->flags))
5696 		changed = !test_and_set_bit(HCI_LINK_SECURITY,
5697 					    &hdev->dev_flags);
5698 	else
5699 		changed = test_and_clear_bit(HCI_LINK_SECURITY,
5700 					     &hdev->dev_flags);
5701 
5702 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5703 			     &match);
5704 
5705 	if (changed)
5706 		new_settings(hdev, match.sk);
5707 
5708 	if (match.sk)
5709 		sock_put(match.sk);
5710 }
5711 
5712 static void clear_eir(struct hci_request *req)
5713 {
5714 	struct hci_dev *hdev = req->hdev;
5715 	struct hci_cp_write_eir cp;
5716 
5717 	if (!lmp_ext_inq_capable(hdev))
5718 		return;
5719 
5720 	memset(hdev->eir, 0, sizeof(hdev->eir));
5721 
5722 	memset(&cp, 0, sizeof(cp));
5723 
5724 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5725 }
5726 
5727 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5728 {
5729 	struct cmd_lookup match = { NULL, hdev };
5730 	struct hci_request req;
5731 	bool changed = false;
5732 
5733 	if (status) {
5734 		u8 mgmt_err = mgmt_status(status);
5735 
5736 		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5737 						 &hdev->dev_flags)) {
5738 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5739 			new_settings(hdev, NULL);
5740 		}
5741 
5742 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5743 				     &mgmt_err);
5744 		return;
5745 	}
5746 
5747 	if (enable) {
5748 		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5749 	} else {
5750 		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5751 		if (!changed)
5752 			changed = test_and_clear_bit(HCI_HS_ENABLED,
5753 						     &hdev->dev_flags);
5754 		else
5755 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5756 	}
5757 
5758 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5759 
5760 	if (changed)
5761 		new_settings(hdev, match.sk);
5762 
5763 	if (match.sk)
5764 		sock_put(match.sk);
5765 
5766 	hci_req_init(&req, hdev);
5767 
5768 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5769 		update_eir(&req);
5770 	else
5771 		clear_eir(&req);
5772 
5773 	hci_req_run(&req, NULL);
5774 }
5775 
5776 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5777 {
5778 	struct cmd_lookup match = { NULL, hdev };
5779 	bool changed = false;
5780 
5781 	if (status) {
5782 		u8 mgmt_err = mgmt_status(status);
5783 
5784 		if (enable) {
5785 			if (test_and_clear_bit(HCI_SC_ENABLED,
5786 					       &hdev->dev_flags))
5787 				new_settings(hdev, NULL);
5788 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5789 		}
5790 
5791 		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5792 				     cmd_status_rsp, &mgmt_err);
5793 		return;
5794 	}
5795 
5796 	if (enable) {
5797 		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5798 	} else {
5799 		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5800 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5801 	}
5802 
5803 	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5804 			     settings_rsp, &match);
5805 
5806 	if (changed)
5807 		new_settings(hdev, match.sk);
5808 
5809 	if (match.sk)
5810 		sock_put(match.sk);
5811 }
5812 
5813 static void sk_lookup(struct pending_cmd *cmd, void *data)
5814 {
5815 	struct cmd_lookup *match = data;
5816 
5817 	if (match->sk == NULL) {
5818 		match->sk = cmd->sk;
5819 		sock_hold(match->sk);
5820 	}
5821 }
5822 
5823 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5824 				    u8 status)
5825 {
5826 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5827 
5828 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5829 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5830 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5831 
5832 	if (!status)
5833 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5834 			   NULL);
5835 
5836 	if (match.sk)
5837 		sock_put(match.sk);
5838 }
5839 
5840 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5841 {
5842 	struct mgmt_cp_set_local_name ev;
5843 	struct pending_cmd *cmd;
5844 
5845 	if (status)
5846 		return;
5847 
5848 	memset(&ev, 0, sizeof(ev));
5849 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5850 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5851 
5852 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5853 	if (!cmd) {
5854 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5855 
5856 		/* If this is a HCI command related to powering on the
5857 		 * HCI dev don't send any mgmt signals.
5858 		 */
5859 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5860 			return;
5861 	}
5862 
5863 	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5864 		   cmd ? cmd->sk : NULL);
5865 }
5866 
5867 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5868 				       u8 *randomizer192, u8 *hash256,
5869 				       u8 *randomizer256, u8 status)
5870 {
5871 	struct pending_cmd *cmd;
5872 
5873 	BT_DBG("%s status %u", hdev->name, status);
5874 
5875 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5876 	if (!cmd)
5877 		return;
5878 
5879 	if (status) {
5880 		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5881 			   mgmt_status(status));
5882 	} else {
5883 		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5884 		    hash256 && randomizer256) {
5885 			struct mgmt_rp_read_local_oob_ext_data rp;
5886 
5887 			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5888 			memcpy(rp.randomizer192, randomizer192,
5889 			       sizeof(rp.randomizer192));
5890 
5891 			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5892 			memcpy(rp.randomizer256, randomizer256,
5893 			       sizeof(rp.randomizer256));
5894 
5895 			cmd_complete(cmd->sk, hdev->id,
5896 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5897 				     &rp, sizeof(rp));
5898 		} else {
5899 			struct mgmt_rp_read_local_oob_data rp;
5900 
5901 			memcpy(rp.hash, hash192, sizeof(rp.hash));
5902 			memcpy(rp.randomizer, randomizer192,
5903 			       sizeof(rp.randomizer));
5904 
5905 			cmd_complete(cmd->sk, hdev->id,
5906 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5907 				     &rp, sizeof(rp));
5908 		}
5909 	}
5910 
5911 	mgmt_pending_remove(cmd);
5912 }
5913 
5914 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5915 		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5916 		       u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5917 		       u8 scan_rsp_len)
5918 {
5919 	char buf[512];
5920 	struct mgmt_ev_device_found *ev = (void *) buf;
5921 	struct smp_irk *irk;
5922 	size_t ev_size;
5923 
5924 	if (!hci_discovery_active(hdev))
5925 		return;
5926 
5927 	/* Make sure that the buffer is big enough. The 5 extra bytes
5928 	 * are for the potential CoD field.
5929 	 */
5930 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5931 		return;
5932 
5933 	memset(buf, 0, sizeof(buf));
5934 
5935 	irk = hci_get_irk(hdev, bdaddr, addr_type);
5936 	if (irk) {
5937 		bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5938 		ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5939 	} else {
5940 		bacpy(&ev->addr.bdaddr, bdaddr);
5941 		ev->addr.type = link_to_bdaddr(link_type, addr_type);
5942 	}
5943 
5944 	ev->rssi = rssi;
5945 	if (cfm_name)
5946 		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5947 	if (!ssp)
5948 		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5949 
5950 	if (eir_len > 0)
5951 		memcpy(ev->eir, eir, eir_len);
5952 
5953 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5954 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5955 					  dev_class, 3);
5956 
5957 	if (scan_rsp_len > 0)
5958 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5959 
5960 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5961 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5962 
5963 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5964 }
5965 
5966 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5967 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5968 {
5969 	struct mgmt_ev_device_found *ev;
5970 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5971 	u16 eir_len;
5972 
5973 	ev = (struct mgmt_ev_device_found *) buf;
5974 
5975 	memset(buf, 0, sizeof(buf));
5976 
5977 	bacpy(&ev->addr.bdaddr, bdaddr);
5978 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5979 	ev->rssi = rssi;
5980 
5981 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5982 				  name_len);
5983 
5984 	ev->eir_len = cpu_to_le16(eir_len);
5985 
5986 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5987 }
5988 
5989 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5990 {
5991 	struct mgmt_ev_discovering ev;
5992 	struct pending_cmd *cmd;
5993 
5994 	BT_DBG("%s discovering %u", hdev->name, discovering);
5995 
5996 	if (discovering)
5997 		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5998 	else
5999 		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6000 
6001 	if (cmd != NULL) {
6002 		u8 type = hdev->discovery.type;
6003 
6004 		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6005 			     sizeof(type));
6006 		mgmt_pending_remove(cmd);
6007 	}
6008 
6009 	memset(&ev, 0, sizeof(ev));
6010 	ev.type = hdev->discovery.type;
6011 	ev.discovering = discovering;
6012 
6013 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6014 }
6015 
6016 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6017 {
6018 	struct pending_cmd *cmd;
6019 	struct mgmt_ev_device_blocked ev;
6020 
6021 	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6022 
6023 	bacpy(&ev.addr.bdaddr, bdaddr);
6024 	ev.addr.type = type;
6025 
6026 	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6027 			  cmd ? cmd->sk : NULL);
6028 }
6029 
6030 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6031 {
6032 	struct pending_cmd *cmd;
6033 	struct mgmt_ev_device_unblocked ev;
6034 
6035 	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6036 
6037 	bacpy(&ev.addr.bdaddr, bdaddr);
6038 	ev.addr.type = type;
6039 
6040 	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6041 			  cmd ? cmd->sk : NULL);
6042 }
6043 
6044 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6045 {
6046 	BT_DBG("%s status %u", hdev->name, status);
6047 
6048 	/* Clear the advertising mgmt setting if we failed to re-enable it */
6049 	if (status) {
6050 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6051 		new_settings(hdev, NULL);
6052 	}
6053 }
6054 
6055 void mgmt_reenable_advertising(struct hci_dev *hdev)
6056 {
6057 	struct hci_request req;
6058 
6059 	if (hci_conn_num(hdev, LE_LINK) > 0)
6060 		return;
6061 
6062 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6063 		return;
6064 
6065 	hci_req_init(&req, hdev);
6066 	enable_advertising(&req);
6067 
6068 	/* If this fails we have no option but to let user space know
6069 	 * that we've disabled advertising.
6070 	 */
6071 	if (hci_req_run(&req, adv_enable_complete) < 0) {
6072 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6073 		new_settings(hdev, NULL);
6074 	}
6075 }
6076