xref: /openbmc/linux/net/bluetooth/mgmt.c (revision bc000245)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 
34 #include "smp.h"
35 
36 #define MGMT_VERSION	1
37 #define MGMT_REVISION	4
38 
39 static const u16 mgmt_commands[] = {
40 	MGMT_OP_READ_INDEX_LIST,
41 	MGMT_OP_READ_INFO,
42 	MGMT_OP_SET_POWERED,
43 	MGMT_OP_SET_DISCOVERABLE,
44 	MGMT_OP_SET_CONNECTABLE,
45 	MGMT_OP_SET_FAST_CONNECTABLE,
46 	MGMT_OP_SET_PAIRABLE,
47 	MGMT_OP_SET_LINK_SECURITY,
48 	MGMT_OP_SET_SSP,
49 	MGMT_OP_SET_HS,
50 	MGMT_OP_SET_LE,
51 	MGMT_OP_SET_DEV_CLASS,
52 	MGMT_OP_SET_LOCAL_NAME,
53 	MGMT_OP_ADD_UUID,
54 	MGMT_OP_REMOVE_UUID,
55 	MGMT_OP_LOAD_LINK_KEYS,
56 	MGMT_OP_LOAD_LONG_TERM_KEYS,
57 	MGMT_OP_DISCONNECT,
58 	MGMT_OP_GET_CONNECTIONS,
59 	MGMT_OP_PIN_CODE_REPLY,
60 	MGMT_OP_PIN_CODE_NEG_REPLY,
61 	MGMT_OP_SET_IO_CAPABILITY,
62 	MGMT_OP_PAIR_DEVICE,
63 	MGMT_OP_CANCEL_PAIR_DEVICE,
64 	MGMT_OP_UNPAIR_DEVICE,
65 	MGMT_OP_USER_CONFIRM_REPLY,
66 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 	MGMT_OP_USER_PASSKEY_REPLY,
68 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 	MGMT_OP_READ_LOCAL_OOB_DATA,
70 	MGMT_OP_ADD_REMOTE_OOB_DATA,
71 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 	MGMT_OP_START_DISCOVERY,
73 	MGMT_OP_STOP_DISCOVERY,
74 	MGMT_OP_CONFIRM_NAME,
75 	MGMT_OP_BLOCK_DEVICE,
76 	MGMT_OP_UNBLOCK_DEVICE,
77 	MGMT_OP_SET_DEVICE_ID,
78 	MGMT_OP_SET_ADVERTISING,
79 	MGMT_OP_SET_BREDR,
80 	MGMT_OP_SET_STATIC_ADDRESS,
81 	MGMT_OP_SET_SCAN_PARAMS,
82 };
83 
84 static const u16 mgmt_events[] = {
85 	MGMT_EV_CONTROLLER_ERROR,
86 	MGMT_EV_INDEX_ADDED,
87 	MGMT_EV_INDEX_REMOVED,
88 	MGMT_EV_NEW_SETTINGS,
89 	MGMT_EV_CLASS_OF_DEV_CHANGED,
90 	MGMT_EV_LOCAL_NAME_CHANGED,
91 	MGMT_EV_NEW_LINK_KEY,
92 	MGMT_EV_NEW_LONG_TERM_KEY,
93 	MGMT_EV_DEVICE_CONNECTED,
94 	MGMT_EV_DEVICE_DISCONNECTED,
95 	MGMT_EV_CONNECT_FAILED,
96 	MGMT_EV_PIN_CODE_REQUEST,
97 	MGMT_EV_USER_CONFIRM_REQUEST,
98 	MGMT_EV_USER_PASSKEY_REQUEST,
99 	MGMT_EV_AUTH_FAILED,
100 	MGMT_EV_DEVICE_FOUND,
101 	MGMT_EV_DISCOVERING,
102 	MGMT_EV_DEVICE_BLOCKED,
103 	MGMT_EV_DEVICE_UNBLOCKED,
104 	MGMT_EV_DEVICE_UNPAIRED,
105 	MGMT_EV_PASSKEY_NOTIFY,
106 };
107 
108 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
109 
110 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
111 				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
112 
113 struct pending_cmd {
114 	struct list_head list;
115 	u16 opcode;
116 	int index;
117 	void *param;
118 	struct sock *sk;
119 	void *user_data;
120 };
121 
122 /* HCI to MGMT error code conversion table */
123 static u8 mgmt_status_table[] = {
124 	MGMT_STATUS_SUCCESS,
125 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
126 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
127 	MGMT_STATUS_FAILED,		/* Hardware Failure */
128 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
129 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
130 	MGMT_STATUS_NOT_PAIRED,		/* PIN or Key Missing */
131 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
132 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
133 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
134 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
135 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
136 	MGMT_STATUS_BUSY,		/* Command Disallowed */
137 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
138 	MGMT_STATUS_REJECTED,		/* Rejected Security */
139 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
140 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
141 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
142 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
143 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
144 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
145 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
146 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
147 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
148 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
149 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
150 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
151 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
152 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
153 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
154 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
155 	MGMT_STATUS_FAILED,		/* Unspecified Error */
156 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
157 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
158 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
159 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
160 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
161 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
162 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
163 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
164 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
165 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
166 	MGMT_STATUS_FAILED,		/* Transaction Collision */
167 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
168 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
169 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
170 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
171 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
172 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
173 	MGMT_STATUS_FAILED,		/* Slot Violation */
174 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
175 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
176 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
177 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
178 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
179 	MGMT_STATUS_BUSY,		/* Controller Busy */
180 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
181 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
182 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
183 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
184 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
185 };
186 
187 static u8 mgmt_status(u8 hci_status)
188 {
189 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
190 		return mgmt_status_table[hci_status];
191 
192 	return MGMT_STATUS_FAILED;
193 }
194 
195 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
196 {
197 	struct sk_buff *skb;
198 	struct mgmt_hdr *hdr;
199 	struct mgmt_ev_cmd_status *ev;
200 	int err;
201 
202 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
203 
204 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
205 	if (!skb)
206 		return -ENOMEM;
207 
208 	hdr = (void *) skb_put(skb, sizeof(*hdr));
209 
210 	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211 	hdr->index = cpu_to_le16(index);
212 	hdr->len = cpu_to_le16(sizeof(*ev));
213 
214 	ev = (void *) skb_put(skb, sizeof(*ev));
215 	ev->status = status;
216 	ev->opcode = cpu_to_le16(cmd);
217 
218 	err = sock_queue_rcv_skb(sk, skb);
219 	if (err < 0)
220 		kfree_skb(skb);
221 
222 	return err;
223 }
224 
225 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226 			void *rp, size_t rp_len)
227 {
228 	struct sk_buff *skb;
229 	struct mgmt_hdr *hdr;
230 	struct mgmt_ev_cmd_complete *ev;
231 	int err;
232 
233 	BT_DBG("sock %p", sk);
234 
235 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
236 	if (!skb)
237 		return -ENOMEM;
238 
239 	hdr = (void *) skb_put(skb, sizeof(*hdr));
240 
241 	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242 	hdr->index = cpu_to_le16(index);
243 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
244 
245 	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246 	ev->opcode = cpu_to_le16(cmd);
247 	ev->status = status;
248 
249 	if (rp)
250 		memcpy(ev->data, rp, rp_len);
251 
252 	err = sock_queue_rcv_skb(sk, skb);
253 	if (err < 0)
254 		kfree_skb(skb);
255 
256 	return err;
257 }
258 
259 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
260 			u16 data_len)
261 {
262 	struct mgmt_rp_read_version rp;
263 
264 	BT_DBG("sock %p", sk);
265 
266 	rp.version = MGMT_VERSION;
267 	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
268 
269 	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
270 			    sizeof(rp));
271 }
272 
273 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
274 			 u16 data_len)
275 {
276 	struct mgmt_rp_read_commands *rp;
277 	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
278 	const u16 num_events = ARRAY_SIZE(mgmt_events);
279 	__le16 *opcode;
280 	size_t rp_size;
281 	int i, err;
282 
283 	BT_DBG("sock %p", sk);
284 
285 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
286 
287 	rp = kmalloc(rp_size, GFP_KERNEL);
288 	if (!rp)
289 		return -ENOMEM;
290 
291 	rp->num_commands = __constant_cpu_to_le16(num_commands);
292 	rp->num_events = __constant_cpu_to_le16(num_events);
293 
294 	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
295 		put_unaligned_le16(mgmt_commands[i], opcode);
296 
297 	for (i = 0; i < num_events; i++, opcode++)
298 		put_unaligned_le16(mgmt_events[i], opcode);
299 
300 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
301 			   rp_size);
302 	kfree(rp);
303 
304 	return err;
305 }
306 
307 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
308 			   u16 data_len)
309 {
310 	struct mgmt_rp_read_index_list *rp;
311 	struct hci_dev *d;
312 	size_t rp_len;
313 	u16 count;
314 	int err;
315 
316 	BT_DBG("sock %p", sk);
317 
318 	read_lock(&hci_dev_list_lock);
319 
320 	count = 0;
321 	list_for_each_entry(d, &hci_dev_list, list) {
322 		if (d->dev_type == HCI_BREDR)
323 			count++;
324 	}
325 
326 	rp_len = sizeof(*rp) + (2 * count);
327 	rp = kmalloc(rp_len, GFP_ATOMIC);
328 	if (!rp) {
329 		read_unlock(&hci_dev_list_lock);
330 		return -ENOMEM;
331 	}
332 
333 	count = 0;
334 	list_for_each_entry(d, &hci_dev_list, list) {
335 		if (test_bit(HCI_SETUP, &d->dev_flags))
336 			continue;
337 
338 		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
339 			continue;
340 
341 		if (d->dev_type == HCI_BREDR) {
342 			rp->index[count++] = cpu_to_le16(d->id);
343 			BT_DBG("Added hci%u", d->id);
344 		}
345 	}
346 
347 	rp->num_controllers = cpu_to_le16(count);
348 	rp_len = sizeof(*rp) + (2 * count);
349 
350 	read_unlock(&hci_dev_list_lock);
351 
352 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
353 			   rp_len);
354 
355 	kfree(rp);
356 
357 	return err;
358 }
359 
360 static u32 get_supported_settings(struct hci_dev *hdev)
361 {
362 	u32 settings = 0;
363 
364 	settings |= MGMT_SETTING_POWERED;
365 	settings |= MGMT_SETTING_PAIRABLE;
366 
367 	if (lmp_bredr_capable(hdev)) {
368 		settings |= MGMT_SETTING_CONNECTABLE;
369 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
370 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
371 		settings |= MGMT_SETTING_DISCOVERABLE;
372 		settings |= MGMT_SETTING_BREDR;
373 		settings |= MGMT_SETTING_LINK_SECURITY;
374 
375 		if (lmp_ssp_capable(hdev)) {
376 			settings |= MGMT_SETTING_SSP;
377 			settings |= MGMT_SETTING_HS;
378 		}
379 	}
380 
381 	if (lmp_le_capable(hdev)) {
382 		settings |= MGMT_SETTING_LE;
383 		settings |= MGMT_SETTING_ADVERTISING;
384 	}
385 
386 	return settings;
387 }
388 
389 static u32 get_current_settings(struct hci_dev *hdev)
390 {
391 	u32 settings = 0;
392 
393 	if (hdev_is_powered(hdev))
394 		settings |= MGMT_SETTING_POWERED;
395 
396 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 		settings |= MGMT_SETTING_CONNECTABLE;
398 
399 	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
400 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
401 
402 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 		settings |= MGMT_SETTING_DISCOVERABLE;
404 
405 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 		settings |= MGMT_SETTING_PAIRABLE;
407 
408 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 		settings |= MGMT_SETTING_BREDR;
410 
411 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 		settings |= MGMT_SETTING_LE;
413 
414 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 		settings |= MGMT_SETTING_LINK_SECURITY;
416 
417 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 		settings |= MGMT_SETTING_SSP;
419 
420 	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
421 		settings |= MGMT_SETTING_HS;
422 
423 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 		settings |= MGMT_SETTING_ADVERTISING;
425 
426 	return settings;
427 }
428 
429 #define PNP_INFO_SVCLASS_ID		0x1200
430 
431 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
432 {
433 	u8 *ptr = data, *uuids_start = NULL;
434 	struct bt_uuid *uuid;
435 
436 	if (len < 4)
437 		return ptr;
438 
439 	list_for_each_entry(uuid, &hdev->uuids, list) {
440 		u16 uuid16;
441 
442 		if (uuid->size != 16)
443 			continue;
444 
445 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
446 		if (uuid16 < 0x1100)
447 			continue;
448 
449 		if (uuid16 == PNP_INFO_SVCLASS_ID)
450 			continue;
451 
452 		if (!uuids_start) {
453 			uuids_start = ptr;
454 			uuids_start[0] = 1;
455 			uuids_start[1] = EIR_UUID16_ALL;
456 			ptr += 2;
457 		}
458 
459 		/* Stop if not enough space to put next UUID */
460 		if ((ptr - data) + sizeof(u16) > len) {
461 			uuids_start[1] = EIR_UUID16_SOME;
462 			break;
463 		}
464 
465 		*ptr++ = (uuid16 & 0x00ff);
466 		*ptr++ = (uuid16 & 0xff00) >> 8;
467 		uuids_start[0] += sizeof(uuid16);
468 	}
469 
470 	return ptr;
471 }
472 
473 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
474 {
475 	u8 *ptr = data, *uuids_start = NULL;
476 	struct bt_uuid *uuid;
477 
478 	if (len < 6)
479 		return ptr;
480 
481 	list_for_each_entry(uuid, &hdev->uuids, list) {
482 		if (uuid->size != 32)
483 			continue;
484 
485 		if (!uuids_start) {
486 			uuids_start = ptr;
487 			uuids_start[0] = 1;
488 			uuids_start[1] = EIR_UUID32_ALL;
489 			ptr += 2;
490 		}
491 
492 		/* Stop if not enough space to put next UUID */
493 		if ((ptr - data) + sizeof(u32) > len) {
494 			uuids_start[1] = EIR_UUID32_SOME;
495 			break;
496 		}
497 
498 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
499 		ptr += sizeof(u32);
500 		uuids_start[0] += sizeof(u32);
501 	}
502 
503 	return ptr;
504 }
505 
506 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
507 {
508 	u8 *ptr = data, *uuids_start = NULL;
509 	struct bt_uuid *uuid;
510 
511 	if (len < 18)
512 		return ptr;
513 
514 	list_for_each_entry(uuid, &hdev->uuids, list) {
515 		if (uuid->size != 128)
516 			continue;
517 
518 		if (!uuids_start) {
519 			uuids_start = ptr;
520 			uuids_start[0] = 1;
521 			uuids_start[1] = EIR_UUID128_ALL;
522 			ptr += 2;
523 		}
524 
525 		/* Stop if not enough space to put next UUID */
526 		if ((ptr - data) + 16 > len) {
527 			uuids_start[1] = EIR_UUID128_SOME;
528 			break;
529 		}
530 
531 		memcpy(ptr, uuid->uuid, 16);
532 		ptr += 16;
533 		uuids_start[0] += 16;
534 	}
535 
536 	return ptr;
537 }
538 
539 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
540 {
541 	struct pending_cmd *cmd;
542 
543 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
544 		if (cmd->opcode == opcode)
545 			return cmd;
546 	}
547 
548 	return NULL;
549 }
550 
551 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
552 {
553 	u8 ad_len = 0;
554 	size_t name_len;
555 
556 	name_len = strlen(hdev->dev_name);
557 	if (name_len > 0) {
558 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
559 
560 		if (name_len > max_len) {
561 			name_len = max_len;
562 			ptr[1] = EIR_NAME_SHORT;
563 		} else
564 			ptr[1] = EIR_NAME_COMPLETE;
565 
566 		ptr[0] = name_len + 1;
567 
568 		memcpy(ptr + 2, hdev->dev_name, name_len);
569 
570 		ad_len += (name_len + 2);
571 		ptr += (name_len + 2);
572 	}
573 
574 	return ad_len;
575 }
576 
577 static void update_scan_rsp_data(struct hci_request *req)
578 {
579 	struct hci_dev *hdev = req->hdev;
580 	struct hci_cp_le_set_scan_rsp_data cp;
581 	u8 len;
582 
583 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
584 		return;
585 
586 	memset(&cp, 0, sizeof(cp));
587 
588 	len = create_scan_rsp_data(hdev, cp.data);
589 
590 	if (hdev->scan_rsp_data_len == len &&
591 	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
592 		return;
593 
594 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
595 	hdev->scan_rsp_data_len = len;
596 
597 	cp.length = len;
598 
599 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
600 }
601 
602 static u8 get_adv_discov_flags(struct hci_dev *hdev)
603 {
604 	struct pending_cmd *cmd;
605 
606 	/* If there's a pending mgmt command the flags will not yet have
607 	 * their final values, so check for this first.
608 	 */
609 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
610 	if (cmd) {
611 		struct mgmt_mode *cp = cmd->param;
612 		if (cp->val == 0x01)
613 			return LE_AD_GENERAL;
614 		else if (cp->val == 0x02)
615 			return LE_AD_LIMITED;
616 	} else {
617 		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
618 			return LE_AD_LIMITED;
619 		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
620 			return LE_AD_GENERAL;
621 	}
622 
623 	return 0;
624 }
625 
626 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
627 {
628 	u8 ad_len = 0, flags = 0;
629 
630 	flags |= get_adv_discov_flags(hdev);
631 
632 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
633 		if (lmp_le_br_capable(hdev))
634 			flags |= LE_AD_SIM_LE_BREDR_CTRL;
635 		if (lmp_host_le_br_capable(hdev))
636 			flags |= LE_AD_SIM_LE_BREDR_HOST;
637 	} else {
638 		flags |= LE_AD_NO_BREDR;
639 	}
640 
641 	if (flags) {
642 		BT_DBG("adv flags 0x%02x", flags);
643 
644 		ptr[0] = 2;
645 		ptr[1] = EIR_FLAGS;
646 		ptr[2] = flags;
647 
648 		ad_len += 3;
649 		ptr += 3;
650 	}
651 
652 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
653 		ptr[0] = 2;
654 		ptr[1] = EIR_TX_POWER;
655 		ptr[2] = (u8) hdev->adv_tx_power;
656 
657 		ad_len += 3;
658 		ptr += 3;
659 	}
660 
661 	return ad_len;
662 }
663 
664 static void update_adv_data(struct hci_request *req)
665 {
666 	struct hci_dev *hdev = req->hdev;
667 	struct hci_cp_le_set_adv_data cp;
668 	u8 len;
669 
670 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
671 		return;
672 
673 	memset(&cp, 0, sizeof(cp));
674 
675 	len = create_adv_data(hdev, cp.data);
676 
677 	if (hdev->adv_data_len == len &&
678 	    memcmp(cp.data, hdev->adv_data, len) == 0)
679 		return;
680 
681 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
682 	hdev->adv_data_len = len;
683 
684 	cp.length = len;
685 
686 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
687 }
688 
689 static void create_eir(struct hci_dev *hdev, u8 *data)
690 {
691 	u8 *ptr = data;
692 	size_t name_len;
693 
694 	name_len = strlen(hdev->dev_name);
695 
696 	if (name_len > 0) {
697 		/* EIR Data type */
698 		if (name_len > 48) {
699 			name_len = 48;
700 			ptr[1] = EIR_NAME_SHORT;
701 		} else
702 			ptr[1] = EIR_NAME_COMPLETE;
703 
704 		/* EIR Data length */
705 		ptr[0] = name_len + 1;
706 
707 		memcpy(ptr + 2, hdev->dev_name, name_len);
708 
709 		ptr += (name_len + 2);
710 	}
711 
712 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
713 		ptr[0] = 2;
714 		ptr[1] = EIR_TX_POWER;
715 		ptr[2] = (u8) hdev->inq_tx_power;
716 
717 		ptr += 3;
718 	}
719 
720 	if (hdev->devid_source > 0) {
721 		ptr[0] = 9;
722 		ptr[1] = EIR_DEVICE_ID;
723 
724 		put_unaligned_le16(hdev->devid_source, ptr + 2);
725 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
726 		put_unaligned_le16(hdev->devid_product, ptr + 6);
727 		put_unaligned_le16(hdev->devid_version, ptr + 8);
728 
729 		ptr += 10;
730 	}
731 
732 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
733 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
734 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
735 }
736 
737 static void update_eir(struct hci_request *req)
738 {
739 	struct hci_dev *hdev = req->hdev;
740 	struct hci_cp_write_eir cp;
741 
742 	if (!hdev_is_powered(hdev))
743 		return;
744 
745 	if (!lmp_ext_inq_capable(hdev))
746 		return;
747 
748 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
749 		return;
750 
751 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
752 		return;
753 
754 	memset(&cp, 0, sizeof(cp));
755 
756 	create_eir(hdev, cp.data);
757 
758 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
759 		return;
760 
761 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
762 
763 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
764 }
765 
766 static u8 get_service_classes(struct hci_dev *hdev)
767 {
768 	struct bt_uuid *uuid;
769 	u8 val = 0;
770 
771 	list_for_each_entry(uuid, &hdev->uuids, list)
772 		val |= uuid->svc_hint;
773 
774 	return val;
775 }
776 
777 static void update_class(struct hci_request *req)
778 {
779 	struct hci_dev *hdev = req->hdev;
780 	u8 cod[3];
781 
782 	BT_DBG("%s", hdev->name);
783 
784 	if (!hdev_is_powered(hdev))
785 		return;
786 
787 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
788 		return;
789 
790 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
791 		return;
792 
793 	cod[0] = hdev->minor_class;
794 	cod[1] = hdev->major_class;
795 	cod[2] = get_service_classes(hdev);
796 
797 	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
798 		cod[1] |= 0x20;
799 
800 	if (memcmp(cod, hdev->dev_class, 3) == 0)
801 		return;
802 
803 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
804 }
805 
806 static void service_cache_off(struct work_struct *work)
807 {
808 	struct hci_dev *hdev = container_of(work, struct hci_dev,
809 					    service_cache.work);
810 	struct hci_request req;
811 
812 	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
813 		return;
814 
815 	hci_req_init(&req, hdev);
816 
817 	hci_dev_lock(hdev);
818 
819 	update_eir(&req);
820 	update_class(&req);
821 
822 	hci_dev_unlock(hdev);
823 
824 	hci_req_run(&req, NULL);
825 }
826 
827 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
828 {
829 	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
830 		return;
831 
832 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
833 
834 	/* Non-mgmt controlled devices get this bit set
835 	 * implicitly so that pairing works for them, however
836 	 * for mgmt we require user-space to explicitly enable
837 	 * it
838 	 */
839 	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
840 }
841 
842 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
843 				void *data, u16 data_len)
844 {
845 	struct mgmt_rp_read_info rp;
846 
847 	BT_DBG("sock %p %s", sk, hdev->name);
848 
849 	hci_dev_lock(hdev);
850 
851 	memset(&rp, 0, sizeof(rp));
852 
853 	bacpy(&rp.bdaddr, &hdev->bdaddr);
854 
855 	rp.version = hdev->hci_ver;
856 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
857 
858 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
859 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
860 
861 	memcpy(rp.dev_class, hdev->dev_class, 3);
862 
863 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
864 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
865 
866 	hci_dev_unlock(hdev);
867 
868 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
869 			    sizeof(rp));
870 }
871 
872 static void mgmt_pending_free(struct pending_cmd *cmd)
873 {
874 	sock_put(cmd->sk);
875 	kfree(cmd->param);
876 	kfree(cmd);
877 }
878 
879 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
880 					    struct hci_dev *hdev, void *data,
881 					    u16 len)
882 {
883 	struct pending_cmd *cmd;
884 
885 	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
886 	if (!cmd)
887 		return NULL;
888 
889 	cmd->opcode = opcode;
890 	cmd->index = hdev->id;
891 
892 	cmd->param = kmalloc(len, GFP_KERNEL);
893 	if (!cmd->param) {
894 		kfree(cmd);
895 		return NULL;
896 	}
897 
898 	if (data)
899 		memcpy(cmd->param, data, len);
900 
901 	cmd->sk = sk;
902 	sock_hold(sk);
903 
904 	list_add(&cmd->list, &hdev->mgmt_pending);
905 
906 	return cmd;
907 }
908 
909 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
910 				 void (*cb)(struct pending_cmd *cmd,
911 					    void *data),
912 				 void *data)
913 {
914 	struct pending_cmd *cmd, *tmp;
915 
916 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
917 		if (opcode > 0 && cmd->opcode != opcode)
918 			continue;
919 
920 		cb(cmd, data);
921 	}
922 }
923 
924 static void mgmt_pending_remove(struct pending_cmd *cmd)
925 {
926 	list_del(&cmd->list);
927 	mgmt_pending_free(cmd);
928 }
929 
930 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
931 {
932 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
933 
934 	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
935 			    sizeof(settings));
936 }
937 
938 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
939 		       u16 len)
940 {
941 	struct mgmt_mode *cp = data;
942 	struct pending_cmd *cmd;
943 	int err;
944 
945 	BT_DBG("request for %s", hdev->name);
946 
947 	if (cp->val != 0x00 && cp->val != 0x01)
948 		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
949 				  MGMT_STATUS_INVALID_PARAMS);
950 
951 	hci_dev_lock(hdev);
952 
953 	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
954 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
955 				 MGMT_STATUS_BUSY);
956 		goto failed;
957 	}
958 
959 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
960 		cancel_delayed_work(&hdev->power_off);
961 
962 		if (cp->val) {
963 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
964 					 data, len);
965 			err = mgmt_powered(hdev, 1);
966 			goto failed;
967 		}
968 	}
969 
970 	if (!!cp->val == hdev_is_powered(hdev)) {
971 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
972 		goto failed;
973 	}
974 
975 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
976 	if (!cmd) {
977 		err = -ENOMEM;
978 		goto failed;
979 	}
980 
981 	if (cp->val)
982 		queue_work(hdev->req_workqueue, &hdev->power_on);
983 	else
984 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
985 
986 	err = 0;
987 
988 failed:
989 	hci_dev_unlock(hdev);
990 	return err;
991 }
992 
993 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
994 		      struct sock *skip_sk)
995 {
996 	struct sk_buff *skb;
997 	struct mgmt_hdr *hdr;
998 
999 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1000 	if (!skb)
1001 		return -ENOMEM;
1002 
1003 	hdr = (void *) skb_put(skb, sizeof(*hdr));
1004 	hdr->opcode = cpu_to_le16(event);
1005 	if (hdev)
1006 		hdr->index = cpu_to_le16(hdev->id);
1007 	else
1008 		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1009 	hdr->len = cpu_to_le16(data_len);
1010 
1011 	if (data)
1012 		memcpy(skb_put(skb, data_len), data, data_len);
1013 
1014 	/* Time stamp */
1015 	__net_timestamp(skb);
1016 
1017 	hci_send_to_control(skb, skip_sk);
1018 	kfree_skb(skb);
1019 
1020 	return 0;
1021 }
1022 
1023 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1024 {
1025 	__le32 ev;
1026 
1027 	ev = cpu_to_le32(get_current_settings(hdev));
1028 
1029 	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1030 }
1031 
1032 struct cmd_lookup {
1033 	struct sock *sk;
1034 	struct hci_dev *hdev;
1035 	u8 mgmt_status;
1036 };
1037 
1038 static void settings_rsp(struct pending_cmd *cmd, void *data)
1039 {
1040 	struct cmd_lookup *match = data;
1041 
1042 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1043 
1044 	list_del(&cmd->list);
1045 
1046 	if (match->sk == NULL) {
1047 		match->sk = cmd->sk;
1048 		sock_hold(match->sk);
1049 	}
1050 
1051 	mgmt_pending_free(cmd);
1052 }
1053 
1054 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1055 {
1056 	u8 *status = data;
1057 
1058 	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1059 	mgmt_pending_remove(cmd);
1060 }
1061 
1062 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1063 {
1064 	if (!lmp_bredr_capable(hdev))
1065 		return MGMT_STATUS_NOT_SUPPORTED;
1066 	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1067 		return MGMT_STATUS_REJECTED;
1068 	else
1069 		return MGMT_STATUS_SUCCESS;
1070 }
1071 
1072 static u8 mgmt_le_support(struct hci_dev *hdev)
1073 {
1074 	if (!lmp_le_capable(hdev))
1075 		return MGMT_STATUS_NOT_SUPPORTED;
1076 	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1077 		return MGMT_STATUS_REJECTED;
1078 	else
1079 		return MGMT_STATUS_SUCCESS;
1080 }
1081 
1082 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1083 {
1084 	struct pending_cmd *cmd;
1085 	struct mgmt_mode *cp;
1086 	struct hci_request req;
1087 	bool changed;
1088 
1089 	BT_DBG("status 0x%02x", status);
1090 
1091 	hci_dev_lock(hdev);
1092 
1093 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1094 	if (!cmd)
1095 		goto unlock;
1096 
1097 	if (status) {
1098 		u8 mgmt_err = mgmt_status(status);
1099 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1100 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1101 		goto remove_cmd;
1102 	}
1103 
1104 	cp = cmd->param;
1105 	if (cp->val) {
1106 		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1107 					    &hdev->dev_flags);
1108 
1109 		if (hdev->discov_timeout > 0) {
1110 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1111 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1112 					   to);
1113 		}
1114 	} else {
1115 		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1116 					     &hdev->dev_flags);
1117 	}
1118 
1119 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1120 
1121 	if (changed)
1122 		new_settings(hdev, cmd->sk);
1123 
1124 	/* When the discoverable mode gets changed, make sure
1125 	 * that class of device has the limited discoverable
1126 	 * bit correctly set.
1127 	 */
1128 	hci_req_init(&req, hdev);
1129 	update_class(&req);
1130 	hci_req_run(&req, NULL);
1131 
1132 remove_cmd:
1133 	mgmt_pending_remove(cmd);
1134 
1135 unlock:
1136 	hci_dev_unlock(hdev);
1137 }
1138 
1139 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1140 			    u16 len)
1141 {
1142 	struct mgmt_cp_set_discoverable *cp = data;
1143 	struct pending_cmd *cmd;
1144 	struct hci_request req;
1145 	u16 timeout;
1146 	u8 scan;
1147 	int err;
1148 
1149 	BT_DBG("request for %s", hdev->name);
1150 
1151 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1152 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1153 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1154 				  MGMT_STATUS_REJECTED);
1155 
1156 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1157 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1158 				  MGMT_STATUS_INVALID_PARAMS);
1159 
1160 	timeout = __le16_to_cpu(cp->timeout);
1161 
1162 	/* Disabling discoverable requires that no timeout is set,
1163 	 * and enabling limited discoverable requires a timeout.
1164 	 */
1165 	if ((cp->val == 0x00 && timeout > 0) ||
1166 	    (cp->val == 0x02 && timeout == 0))
1167 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1168 				  MGMT_STATUS_INVALID_PARAMS);
1169 
1170 	hci_dev_lock(hdev);
1171 
1172 	if (!hdev_is_powered(hdev) && timeout > 0) {
1173 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1174 				 MGMT_STATUS_NOT_POWERED);
1175 		goto failed;
1176 	}
1177 
1178 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1179 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1180 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1181 				 MGMT_STATUS_BUSY);
1182 		goto failed;
1183 	}
1184 
1185 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1186 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1187 				 MGMT_STATUS_REJECTED);
1188 		goto failed;
1189 	}
1190 
1191 	if (!hdev_is_powered(hdev)) {
1192 		bool changed = false;
1193 
1194 		/* Setting limited discoverable when powered off is
1195 		 * not a valid operation since it requires a timeout
1196 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1197 		 */
1198 		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1199 			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1200 			changed = true;
1201 		}
1202 
1203 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1204 		if (err < 0)
1205 			goto failed;
1206 
1207 		if (changed)
1208 			err = new_settings(hdev, sk);
1209 
1210 		goto failed;
1211 	}
1212 
1213 	/* If the current mode is the same, then just update the timeout
1214 	 * value with the new value. And if only the timeout gets updated,
1215 	 * then no need for any HCI transactions.
1216 	 */
1217 	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1218 	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1219 					  &hdev->dev_flags)) {
1220 		cancel_delayed_work(&hdev->discov_off);
1221 		hdev->discov_timeout = timeout;
1222 
1223 		if (cp->val && hdev->discov_timeout > 0) {
1224 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1225 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1226 					   to);
1227 		}
1228 
1229 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1230 		goto failed;
1231 	}
1232 
1233 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1234 	if (!cmd) {
1235 		err = -ENOMEM;
1236 		goto failed;
1237 	}
1238 
1239 	/* Cancel any potential discoverable timeout that might be
1240 	 * still active and store new timeout value. The arming of
1241 	 * the timeout happens in the complete handler.
1242 	 */
1243 	cancel_delayed_work(&hdev->discov_off);
1244 	hdev->discov_timeout = timeout;
1245 
1246 	/* Limited discoverable mode */
1247 	if (cp->val == 0x02)
1248 		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1249 	else
1250 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1251 
1252 	hci_req_init(&req, hdev);
1253 
1254 	/* The procedure for LE-only controllers is much simpler - just
1255 	 * update the advertising data.
1256 	 */
1257 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1258 		goto update_ad;
1259 
1260 	scan = SCAN_PAGE;
1261 
1262 	if (cp->val) {
1263 		struct hci_cp_write_current_iac_lap hci_cp;
1264 
1265 		if (cp->val == 0x02) {
1266 			/* Limited discoverable mode */
1267 			hci_cp.num_iac = 2;
1268 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1269 			hci_cp.iac_lap[1] = 0x8b;
1270 			hci_cp.iac_lap[2] = 0x9e;
1271 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1272 			hci_cp.iac_lap[4] = 0x8b;
1273 			hci_cp.iac_lap[5] = 0x9e;
1274 		} else {
1275 			/* General discoverable mode */
1276 			hci_cp.num_iac = 1;
1277 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1278 			hci_cp.iac_lap[1] = 0x8b;
1279 			hci_cp.iac_lap[2] = 0x9e;
1280 		}
1281 
1282 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1283 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1284 
1285 		scan |= SCAN_INQUIRY;
1286 	} else {
1287 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1288 	}
1289 
1290 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1291 
1292 update_ad:
1293 	update_adv_data(&req);
1294 
1295 	err = hci_req_run(&req, set_discoverable_complete);
1296 	if (err < 0)
1297 		mgmt_pending_remove(cmd);
1298 
1299 failed:
1300 	hci_dev_unlock(hdev);
1301 	return err;
1302 }
1303 
1304 static void write_fast_connectable(struct hci_request *req, bool enable)
1305 {
1306 	struct hci_dev *hdev = req->hdev;
1307 	struct hci_cp_write_page_scan_activity acp;
1308 	u8 type;
1309 
1310 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1311 		return;
1312 
1313 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1314 		return;
1315 
1316 	if (enable) {
1317 		type = PAGE_SCAN_TYPE_INTERLACED;
1318 
1319 		/* 160 msec page scan interval */
1320 		acp.interval = __constant_cpu_to_le16(0x0100);
1321 	} else {
1322 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1323 
1324 		/* default 1.28 sec page scan */
1325 		acp.interval = __constant_cpu_to_le16(0x0800);
1326 	}
1327 
1328 	acp.window = __constant_cpu_to_le16(0x0012);
1329 
1330 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1331 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1332 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1333 			    sizeof(acp), &acp);
1334 
1335 	if (hdev->page_scan_type != type)
1336 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1337 }
1338 
1339 static u8 get_adv_type(struct hci_dev *hdev)
1340 {
1341 	struct pending_cmd *cmd;
1342 	bool connectable;
1343 
1344 	/* If there's a pending mgmt command the flag will not yet have
1345 	 * it's final value, so check for this first.
1346 	 */
1347 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1348 	if (cmd) {
1349 		struct mgmt_mode *cp = cmd->param;
1350 		connectable = !!cp->val;
1351 	} else {
1352 		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1353 	}
1354 
1355 	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1356 }
1357 
1358 static void enable_advertising(struct hci_request *req)
1359 {
1360 	struct hci_dev *hdev = req->hdev;
1361 	struct hci_cp_le_set_adv_param cp;
1362 	u8 enable = 0x01;
1363 
1364 	memset(&cp, 0, sizeof(cp));
1365 	cp.min_interval = __constant_cpu_to_le16(0x0800);
1366 	cp.max_interval = __constant_cpu_to_le16(0x0800);
1367 	cp.type = get_adv_type(hdev);
1368 	cp.own_address_type = hdev->own_addr_type;
1369 	cp.channel_map = 0x07;
1370 
1371 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1372 
1373 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1374 }
1375 
1376 static void disable_advertising(struct hci_request *req)
1377 {
1378 	u8 enable = 0x00;
1379 
1380 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1381 }
1382 
1383 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1384 {
1385 	struct pending_cmd *cmd;
1386 	struct mgmt_mode *cp;
1387 	bool changed;
1388 
1389 	BT_DBG("status 0x%02x", status);
1390 
1391 	hci_dev_lock(hdev);
1392 
1393 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1394 	if (!cmd)
1395 		goto unlock;
1396 
1397 	if (status) {
1398 		u8 mgmt_err = mgmt_status(status);
1399 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1400 		goto remove_cmd;
1401 	}
1402 
1403 	cp = cmd->param;
1404 	if (cp->val)
1405 		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1406 	else
1407 		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1408 
1409 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1410 
1411 	if (changed)
1412 		new_settings(hdev, cmd->sk);
1413 
1414 remove_cmd:
1415 	mgmt_pending_remove(cmd);
1416 
1417 unlock:
1418 	hci_dev_unlock(hdev);
1419 }
1420 
1421 static int set_connectable_update_settings(struct hci_dev *hdev,
1422 					   struct sock *sk, u8 val)
1423 {
1424 	bool changed = false;
1425 	int err;
1426 
1427 	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1428 		changed = true;
1429 
1430 	if (val) {
1431 		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1432 	} else {
1433 		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1434 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1435 	}
1436 
1437 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1438 	if (err < 0)
1439 		return err;
1440 
1441 	if (changed)
1442 		return new_settings(hdev, sk);
1443 
1444 	return 0;
1445 }
1446 
1447 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1448 			   u16 len)
1449 {
1450 	struct mgmt_mode *cp = data;
1451 	struct pending_cmd *cmd;
1452 	struct hci_request req;
1453 	u8 scan;
1454 	int err;
1455 
1456 	BT_DBG("request for %s", hdev->name);
1457 
1458 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1459 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1460 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1461 				  MGMT_STATUS_REJECTED);
1462 
1463 	if (cp->val != 0x00 && cp->val != 0x01)
1464 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1465 				  MGMT_STATUS_INVALID_PARAMS);
1466 
1467 	hci_dev_lock(hdev);
1468 
1469 	if (!hdev_is_powered(hdev)) {
1470 		err = set_connectable_update_settings(hdev, sk, cp->val);
1471 		goto failed;
1472 	}
1473 
1474 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1475 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1476 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1477 				 MGMT_STATUS_BUSY);
1478 		goto failed;
1479 	}
1480 
1481 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1482 	if (!cmd) {
1483 		err = -ENOMEM;
1484 		goto failed;
1485 	}
1486 
1487 	hci_req_init(&req, hdev);
1488 
1489 	/* If BR/EDR is not enabled and we disable advertising as a
1490 	 * by-product of disabling connectable, we need to update the
1491 	 * advertising flags.
1492 	 */
1493 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1494 		if (!cp->val) {
1495 			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1496 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1497 		}
1498 		update_adv_data(&req);
1499 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1500 		if (cp->val) {
1501 			scan = SCAN_PAGE;
1502 		} else {
1503 			scan = 0;
1504 
1505 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1506 			    hdev->discov_timeout > 0)
1507 				cancel_delayed_work(&hdev->discov_off);
1508 		}
1509 
1510 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1511 	}
1512 
1513 	/* If we're going from non-connectable to connectable or
1514 	 * vice-versa when fast connectable is enabled ensure that fast
1515 	 * connectable gets disabled. write_fast_connectable won't do
1516 	 * anything if the page scan parameters are already what they
1517 	 * should be.
1518 	 */
1519 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1520 		write_fast_connectable(&req, false);
1521 
1522 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1523 	    hci_conn_num(hdev, LE_LINK) == 0) {
1524 		disable_advertising(&req);
1525 		enable_advertising(&req);
1526 	}
1527 
1528 	err = hci_req_run(&req, set_connectable_complete);
1529 	if (err < 0) {
1530 		mgmt_pending_remove(cmd);
1531 		if (err == -ENODATA)
1532 			err = set_connectable_update_settings(hdev, sk,
1533 							      cp->val);
1534 		goto failed;
1535 	}
1536 
1537 failed:
1538 	hci_dev_unlock(hdev);
1539 	return err;
1540 }
1541 
1542 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1543 			u16 len)
1544 {
1545 	struct mgmt_mode *cp = data;
1546 	bool changed;
1547 	int err;
1548 
1549 	BT_DBG("request for %s", hdev->name);
1550 
1551 	if (cp->val != 0x00 && cp->val != 0x01)
1552 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1553 				  MGMT_STATUS_INVALID_PARAMS);
1554 
1555 	hci_dev_lock(hdev);
1556 
1557 	if (cp->val)
1558 		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1559 	else
1560 		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1561 
1562 	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1563 	if (err < 0)
1564 		goto unlock;
1565 
1566 	if (changed)
1567 		err = new_settings(hdev, sk);
1568 
1569 unlock:
1570 	hci_dev_unlock(hdev);
1571 	return err;
1572 }
1573 
1574 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1575 			     u16 len)
1576 {
1577 	struct mgmt_mode *cp = data;
1578 	struct pending_cmd *cmd;
1579 	u8 val, status;
1580 	int err;
1581 
1582 	BT_DBG("request for %s", hdev->name);
1583 
1584 	status = mgmt_bredr_support(hdev);
1585 	if (status)
1586 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1587 				  status);
1588 
1589 	if (cp->val != 0x00 && cp->val != 0x01)
1590 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1591 				  MGMT_STATUS_INVALID_PARAMS);
1592 
1593 	hci_dev_lock(hdev);
1594 
1595 	if (!hdev_is_powered(hdev)) {
1596 		bool changed = false;
1597 
1598 		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1599 					  &hdev->dev_flags)) {
1600 			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1601 			changed = true;
1602 		}
1603 
1604 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1605 		if (err < 0)
1606 			goto failed;
1607 
1608 		if (changed)
1609 			err = new_settings(hdev, sk);
1610 
1611 		goto failed;
1612 	}
1613 
1614 	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1615 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1616 				 MGMT_STATUS_BUSY);
1617 		goto failed;
1618 	}
1619 
1620 	val = !!cp->val;
1621 
1622 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1623 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1624 		goto failed;
1625 	}
1626 
1627 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1628 	if (!cmd) {
1629 		err = -ENOMEM;
1630 		goto failed;
1631 	}
1632 
1633 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1634 	if (err < 0) {
1635 		mgmt_pending_remove(cmd);
1636 		goto failed;
1637 	}
1638 
1639 failed:
1640 	hci_dev_unlock(hdev);
1641 	return err;
1642 }
1643 
1644 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1645 {
1646 	struct mgmt_mode *cp = data;
1647 	struct pending_cmd *cmd;
1648 	u8 status;
1649 	int err;
1650 
1651 	BT_DBG("request for %s", hdev->name);
1652 
1653 	status = mgmt_bredr_support(hdev);
1654 	if (status)
1655 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1656 
1657 	if (!lmp_ssp_capable(hdev))
1658 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1659 				  MGMT_STATUS_NOT_SUPPORTED);
1660 
1661 	if (cp->val != 0x00 && cp->val != 0x01)
1662 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1663 				  MGMT_STATUS_INVALID_PARAMS);
1664 
1665 	hci_dev_lock(hdev);
1666 
1667 	if (!hdev_is_powered(hdev)) {
1668 		bool changed;
1669 
1670 		if (cp->val) {
1671 			changed = !test_and_set_bit(HCI_SSP_ENABLED,
1672 						    &hdev->dev_flags);
1673 		} else {
1674 			changed = test_and_clear_bit(HCI_SSP_ENABLED,
1675 						     &hdev->dev_flags);
1676 			if (!changed)
1677 				changed = test_and_clear_bit(HCI_HS_ENABLED,
1678 							     &hdev->dev_flags);
1679 			else
1680 				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1681 		}
1682 
1683 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1684 		if (err < 0)
1685 			goto failed;
1686 
1687 		if (changed)
1688 			err = new_settings(hdev, sk);
1689 
1690 		goto failed;
1691 	}
1692 
1693 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1694 	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1695 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1696 				 MGMT_STATUS_BUSY);
1697 		goto failed;
1698 	}
1699 
1700 	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1701 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1702 		goto failed;
1703 	}
1704 
1705 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1706 	if (!cmd) {
1707 		err = -ENOMEM;
1708 		goto failed;
1709 	}
1710 
1711 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1712 	if (err < 0) {
1713 		mgmt_pending_remove(cmd);
1714 		goto failed;
1715 	}
1716 
1717 failed:
1718 	hci_dev_unlock(hdev);
1719 	return err;
1720 }
1721 
1722 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1723 {
1724 	struct mgmt_mode *cp = data;
1725 	bool changed;
1726 	u8 status;
1727 	int err;
1728 
1729 	BT_DBG("request for %s", hdev->name);
1730 
1731 	status = mgmt_bredr_support(hdev);
1732 	if (status)
1733 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1734 
1735 	if (!lmp_ssp_capable(hdev))
1736 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1737 				  MGMT_STATUS_NOT_SUPPORTED);
1738 
1739 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1740 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1741 				  MGMT_STATUS_REJECTED);
1742 
1743 	if (cp->val != 0x00 && cp->val != 0x01)
1744 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1745 				  MGMT_STATUS_INVALID_PARAMS);
1746 
1747 	hci_dev_lock(hdev);
1748 
1749 	if (cp->val) {
1750 		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1751 	} else {
1752 		if (hdev_is_powered(hdev)) {
1753 			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1754 					 MGMT_STATUS_REJECTED);
1755 			goto unlock;
1756 		}
1757 
1758 		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1759 	}
1760 
1761 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1762 	if (err < 0)
1763 		goto unlock;
1764 
1765 	if (changed)
1766 		err = new_settings(hdev, sk);
1767 
1768 unlock:
1769 	hci_dev_unlock(hdev);
1770 	return err;
1771 }
1772 
1773 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1774 {
1775 	struct cmd_lookup match = { NULL, hdev };
1776 
1777 	if (status) {
1778 		u8 mgmt_err = mgmt_status(status);
1779 
1780 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1781 				     &mgmt_err);
1782 		return;
1783 	}
1784 
1785 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1786 
1787 	new_settings(hdev, match.sk);
1788 
1789 	if (match.sk)
1790 		sock_put(match.sk);
1791 
1792 	/* Make sure the controller has a good default for
1793 	 * advertising data. Restrict the update to when LE
1794 	 * has actually been enabled. During power on, the
1795 	 * update in powered_update_hci will take care of it.
1796 	 */
1797 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1798 		struct hci_request req;
1799 
1800 		hci_dev_lock(hdev);
1801 
1802 		hci_req_init(&req, hdev);
1803 		update_adv_data(&req);
1804 		update_scan_rsp_data(&req);
1805 		hci_req_run(&req, NULL);
1806 
1807 		hci_dev_unlock(hdev);
1808 	}
1809 }
1810 
1811 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1812 {
1813 	struct mgmt_mode *cp = data;
1814 	struct hci_cp_write_le_host_supported hci_cp;
1815 	struct pending_cmd *cmd;
1816 	struct hci_request req;
1817 	int err;
1818 	u8 val, enabled;
1819 
1820 	BT_DBG("request for %s", hdev->name);
1821 
1822 	if (!lmp_le_capable(hdev))
1823 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1824 				  MGMT_STATUS_NOT_SUPPORTED);
1825 
1826 	if (cp->val != 0x00 && cp->val != 0x01)
1827 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1828 				  MGMT_STATUS_INVALID_PARAMS);
1829 
1830 	/* LE-only devices do not allow toggling LE on/off */
1831 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1832 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1833 				  MGMT_STATUS_REJECTED);
1834 
1835 	hci_dev_lock(hdev);
1836 
1837 	val = !!cp->val;
1838 	enabled = lmp_host_le_capable(hdev);
1839 
1840 	if (!hdev_is_powered(hdev) || val == enabled) {
1841 		bool changed = false;
1842 
1843 		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1844 			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1845 			changed = true;
1846 		}
1847 
1848 		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1849 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1850 			changed = true;
1851 		}
1852 
1853 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1854 		if (err < 0)
1855 			goto unlock;
1856 
1857 		if (changed)
1858 			err = new_settings(hdev, sk);
1859 
1860 		goto unlock;
1861 	}
1862 
1863 	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1864 	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1865 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1866 				 MGMT_STATUS_BUSY);
1867 		goto unlock;
1868 	}
1869 
1870 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1871 	if (!cmd) {
1872 		err = -ENOMEM;
1873 		goto unlock;
1874 	}
1875 
1876 	hci_req_init(&req, hdev);
1877 
1878 	memset(&hci_cp, 0, sizeof(hci_cp));
1879 
1880 	if (val) {
1881 		hci_cp.le = val;
1882 		hci_cp.simul = lmp_le_br_capable(hdev);
1883 	} else {
1884 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1885 			disable_advertising(&req);
1886 	}
1887 
1888 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1889 		    &hci_cp);
1890 
1891 	err = hci_req_run(&req, le_enable_complete);
1892 	if (err < 0)
1893 		mgmt_pending_remove(cmd);
1894 
1895 unlock:
1896 	hci_dev_unlock(hdev);
1897 	return err;
1898 }
1899 
1900 /* This is a helper function to test for pending mgmt commands that can
1901  * cause CoD or EIR HCI commands. We can only allow one such pending
1902  * mgmt command at a time since otherwise we cannot easily track what
1903  * the current values are, will be, and based on that calculate if a new
1904  * HCI command needs to be sent and if yes with what value.
1905  */
1906 static bool pending_eir_or_class(struct hci_dev *hdev)
1907 {
1908 	struct pending_cmd *cmd;
1909 
1910 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1911 		switch (cmd->opcode) {
1912 		case MGMT_OP_ADD_UUID:
1913 		case MGMT_OP_REMOVE_UUID:
1914 		case MGMT_OP_SET_DEV_CLASS:
1915 		case MGMT_OP_SET_POWERED:
1916 			return true;
1917 		}
1918 	}
1919 
1920 	return false;
1921 }
1922 
1923 static const u8 bluetooth_base_uuid[] = {
1924 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1925 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1926 };
1927 
1928 static u8 get_uuid_size(const u8 *uuid)
1929 {
1930 	u32 val;
1931 
1932 	if (memcmp(uuid, bluetooth_base_uuid, 12))
1933 		return 128;
1934 
1935 	val = get_unaligned_le32(&uuid[12]);
1936 	if (val > 0xffff)
1937 		return 32;
1938 
1939 	return 16;
1940 }
1941 
1942 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1943 {
1944 	struct pending_cmd *cmd;
1945 
1946 	hci_dev_lock(hdev);
1947 
1948 	cmd = mgmt_pending_find(mgmt_op, hdev);
1949 	if (!cmd)
1950 		goto unlock;
1951 
1952 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1953 		     hdev->dev_class, 3);
1954 
1955 	mgmt_pending_remove(cmd);
1956 
1957 unlock:
1958 	hci_dev_unlock(hdev);
1959 }
1960 
1961 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1962 {
1963 	BT_DBG("status 0x%02x", status);
1964 
1965 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1966 }
1967 
1968 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1969 {
1970 	struct mgmt_cp_add_uuid *cp = data;
1971 	struct pending_cmd *cmd;
1972 	struct hci_request req;
1973 	struct bt_uuid *uuid;
1974 	int err;
1975 
1976 	BT_DBG("request for %s", hdev->name);
1977 
1978 	hci_dev_lock(hdev);
1979 
1980 	if (pending_eir_or_class(hdev)) {
1981 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1982 				 MGMT_STATUS_BUSY);
1983 		goto failed;
1984 	}
1985 
1986 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1987 	if (!uuid) {
1988 		err = -ENOMEM;
1989 		goto failed;
1990 	}
1991 
1992 	memcpy(uuid->uuid, cp->uuid, 16);
1993 	uuid->svc_hint = cp->svc_hint;
1994 	uuid->size = get_uuid_size(cp->uuid);
1995 
1996 	list_add_tail(&uuid->list, &hdev->uuids);
1997 
1998 	hci_req_init(&req, hdev);
1999 
2000 	update_class(&req);
2001 	update_eir(&req);
2002 
2003 	err = hci_req_run(&req, add_uuid_complete);
2004 	if (err < 0) {
2005 		if (err != -ENODATA)
2006 			goto failed;
2007 
2008 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2009 				   hdev->dev_class, 3);
2010 		goto failed;
2011 	}
2012 
2013 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2014 	if (!cmd) {
2015 		err = -ENOMEM;
2016 		goto failed;
2017 	}
2018 
2019 	err = 0;
2020 
2021 failed:
2022 	hci_dev_unlock(hdev);
2023 	return err;
2024 }
2025 
2026 static bool enable_service_cache(struct hci_dev *hdev)
2027 {
2028 	if (!hdev_is_powered(hdev))
2029 		return false;
2030 
2031 	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2032 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2033 				   CACHE_TIMEOUT);
2034 		return true;
2035 	}
2036 
2037 	return false;
2038 }
2039 
2040 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2041 {
2042 	BT_DBG("status 0x%02x", status);
2043 
2044 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2045 }
2046 
2047 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2048 		       u16 len)
2049 {
2050 	struct mgmt_cp_remove_uuid *cp = data;
2051 	struct pending_cmd *cmd;
2052 	struct bt_uuid *match, *tmp;
2053 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2054 	struct hci_request req;
2055 	int err, found;
2056 
2057 	BT_DBG("request for %s", hdev->name);
2058 
2059 	hci_dev_lock(hdev);
2060 
2061 	if (pending_eir_or_class(hdev)) {
2062 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2063 				 MGMT_STATUS_BUSY);
2064 		goto unlock;
2065 	}
2066 
2067 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2068 		err = hci_uuids_clear(hdev);
2069 
2070 		if (enable_service_cache(hdev)) {
2071 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2072 					   0, hdev->dev_class, 3);
2073 			goto unlock;
2074 		}
2075 
2076 		goto update_class;
2077 	}
2078 
2079 	found = 0;
2080 
2081 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2082 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2083 			continue;
2084 
2085 		list_del(&match->list);
2086 		kfree(match);
2087 		found++;
2088 	}
2089 
2090 	if (found == 0) {
2091 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2092 				 MGMT_STATUS_INVALID_PARAMS);
2093 		goto unlock;
2094 	}
2095 
2096 update_class:
2097 	hci_req_init(&req, hdev);
2098 
2099 	update_class(&req);
2100 	update_eir(&req);
2101 
2102 	err = hci_req_run(&req, remove_uuid_complete);
2103 	if (err < 0) {
2104 		if (err != -ENODATA)
2105 			goto unlock;
2106 
2107 		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2108 				   hdev->dev_class, 3);
2109 		goto unlock;
2110 	}
2111 
2112 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2113 	if (!cmd) {
2114 		err = -ENOMEM;
2115 		goto unlock;
2116 	}
2117 
2118 	err = 0;
2119 
2120 unlock:
2121 	hci_dev_unlock(hdev);
2122 	return err;
2123 }
2124 
2125 static void set_class_complete(struct hci_dev *hdev, u8 status)
2126 {
2127 	BT_DBG("status 0x%02x", status);
2128 
2129 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2130 }
2131 
2132 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2133 			 u16 len)
2134 {
2135 	struct mgmt_cp_set_dev_class *cp = data;
2136 	struct pending_cmd *cmd;
2137 	struct hci_request req;
2138 	int err;
2139 
2140 	BT_DBG("request for %s", hdev->name);
2141 
2142 	if (!lmp_bredr_capable(hdev))
2143 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2144 				  MGMT_STATUS_NOT_SUPPORTED);
2145 
2146 	hci_dev_lock(hdev);
2147 
2148 	if (pending_eir_or_class(hdev)) {
2149 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2150 				 MGMT_STATUS_BUSY);
2151 		goto unlock;
2152 	}
2153 
2154 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2155 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2156 				 MGMT_STATUS_INVALID_PARAMS);
2157 		goto unlock;
2158 	}
2159 
2160 	hdev->major_class = cp->major;
2161 	hdev->minor_class = cp->minor;
2162 
2163 	if (!hdev_is_powered(hdev)) {
2164 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2165 				   hdev->dev_class, 3);
2166 		goto unlock;
2167 	}
2168 
2169 	hci_req_init(&req, hdev);
2170 
2171 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2172 		hci_dev_unlock(hdev);
2173 		cancel_delayed_work_sync(&hdev->service_cache);
2174 		hci_dev_lock(hdev);
2175 		update_eir(&req);
2176 	}
2177 
2178 	update_class(&req);
2179 
2180 	err = hci_req_run(&req, set_class_complete);
2181 	if (err < 0) {
2182 		if (err != -ENODATA)
2183 			goto unlock;
2184 
2185 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2186 				   hdev->dev_class, 3);
2187 		goto unlock;
2188 	}
2189 
2190 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2191 	if (!cmd) {
2192 		err = -ENOMEM;
2193 		goto unlock;
2194 	}
2195 
2196 	err = 0;
2197 
2198 unlock:
2199 	hci_dev_unlock(hdev);
2200 	return err;
2201 }
2202 
2203 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2204 			  u16 len)
2205 {
2206 	struct mgmt_cp_load_link_keys *cp = data;
2207 	u16 key_count, expected_len;
2208 	int i;
2209 
2210 	BT_DBG("request for %s", hdev->name);
2211 
2212 	if (!lmp_bredr_capable(hdev))
2213 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2214 				  MGMT_STATUS_NOT_SUPPORTED);
2215 
2216 	key_count = __le16_to_cpu(cp->key_count);
2217 
2218 	expected_len = sizeof(*cp) + key_count *
2219 					sizeof(struct mgmt_link_key_info);
2220 	if (expected_len != len) {
2221 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2222 		       len, expected_len);
2223 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2224 				  MGMT_STATUS_INVALID_PARAMS);
2225 	}
2226 
2227 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2228 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2229 				  MGMT_STATUS_INVALID_PARAMS);
2230 
2231 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2232 	       key_count);
2233 
2234 	for (i = 0; i < key_count; i++) {
2235 		struct mgmt_link_key_info *key = &cp->keys[i];
2236 
2237 		if (key->addr.type != BDADDR_BREDR)
2238 			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2239 					  MGMT_STATUS_INVALID_PARAMS);
2240 	}
2241 
2242 	hci_dev_lock(hdev);
2243 
2244 	hci_link_keys_clear(hdev);
2245 
2246 	if (cp->debug_keys)
2247 		set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2248 	else
2249 		clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2250 
2251 	for (i = 0; i < key_count; i++) {
2252 		struct mgmt_link_key_info *key = &cp->keys[i];
2253 
2254 		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2255 				 key->type, key->pin_len);
2256 	}
2257 
2258 	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2259 
2260 	hci_dev_unlock(hdev);
2261 
2262 	return 0;
2263 }
2264 
2265 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2266 			   u8 addr_type, struct sock *skip_sk)
2267 {
2268 	struct mgmt_ev_device_unpaired ev;
2269 
2270 	bacpy(&ev.addr.bdaddr, bdaddr);
2271 	ev.addr.type = addr_type;
2272 
2273 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2274 			  skip_sk);
2275 }
2276 
2277 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2278 			 u16 len)
2279 {
2280 	struct mgmt_cp_unpair_device *cp = data;
2281 	struct mgmt_rp_unpair_device rp;
2282 	struct hci_cp_disconnect dc;
2283 	struct pending_cmd *cmd;
2284 	struct hci_conn *conn;
2285 	int err;
2286 
2287 	memset(&rp, 0, sizeof(rp));
2288 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2289 	rp.addr.type = cp->addr.type;
2290 
2291 	if (!bdaddr_type_is_valid(cp->addr.type))
2292 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2293 				    MGMT_STATUS_INVALID_PARAMS,
2294 				    &rp, sizeof(rp));
2295 
2296 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2297 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2298 				    MGMT_STATUS_INVALID_PARAMS,
2299 				    &rp, sizeof(rp));
2300 
2301 	hci_dev_lock(hdev);
2302 
2303 	if (!hdev_is_powered(hdev)) {
2304 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2305 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2306 		goto unlock;
2307 	}
2308 
2309 	if (cp->addr.type == BDADDR_BREDR)
2310 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2311 	else
2312 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2313 
2314 	if (err < 0) {
2315 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2316 				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2317 		goto unlock;
2318 	}
2319 
2320 	if (cp->disconnect) {
2321 		if (cp->addr.type == BDADDR_BREDR)
2322 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2323 						       &cp->addr.bdaddr);
2324 		else
2325 			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2326 						       &cp->addr.bdaddr);
2327 	} else {
2328 		conn = NULL;
2329 	}
2330 
2331 	if (!conn) {
2332 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2333 				   &rp, sizeof(rp));
2334 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2335 		goto unlock;
2336 	}
2337 
2338 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2339 			       sizeof(*cp));
2340 	if (!cmd) {
2341 		err = -ENOMEM;
2342 		goto unlock;
2343 	}
2344 
2345 	dc.handle = cpu_to_le16(conn->handle);
2346 	dc.reason = 0x13; /* Remote User Terminated Connection */
2347 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2348 	if (err < 0)
2349 		mgmt_pending_remove(cmd);
2350 
2351 unlock:
2352 	hci_dev_unlock(hdev);
2353 	return err;
2354 }
2355 
2356 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2357 		      u16 len)
2358 {
2359 	struct mgmt_cp_disconnect *cp = data;
2360 	struct mgmt_rp_disconnect rp;
2361 	struct hci_cp_disconnect dc;
2362 	struct pending_cmd *cmd;
2363 	struct hci_conn *conn;
2364 	int err;
2365 
2366 	BT_DBG("");
2367 
2368 	memset(&rp, 0, sizeof(rp));
2369 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2370 	rp.addr.type = cp->addr.type;
2371 
2372 	if (!bdaddr_type_is_valid(cp->addr.type))
2373 		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2374 				    MGMT_STATUS_INVALID_PARAMS,
2375 				    &rp, sizeof(rp));
2376 
2377 	hci_dev_lock(hdev);
2378 
2379 	if (!test_bit(HCI_UP, &hdev->flags)) {
2380 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2381 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2382 		goto failed;
2383 	}
2384 
2385 	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2386 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2387 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2388 		goto failed;
2389 	}
2390 
2391 	if (cp->addr.type == BDADDR_BREDR)
2392 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2393 					       &cp->addr.bdaddr);
2394 	else
2395 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2396 
2397 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2398 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2399 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2400 		goto failed;
2401 	}
2402 
2403 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2404 	if (!cmd) {
2405 		err = -ENOMEM;
2406 		goto failed;
2407 	}
2408 
2409 	dc.handle = cpu_to_le16(conn->handle);
2410 	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2411 
2412 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2413 	if (err < 0)
2414 		mgmt_pending_remove(cmd);
2415 
2416 failed:
2417 	hci_dev_unlock(hdev);
2418 	return err;
2419 }
2420 
2421 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2422 {
2423 	switch (link_type) {
2424 	case LE_LINK:
2425 		switch (addr_type) {
2426 		case ADDR_LE_DEV_PUBLIC:
2427 			return BDADDR_LE_PUBLIC;
2428 
2429 		default:
2430 			/* Fallback to LE Random address type */
2431 			return BDADDR_LE_RANDOM;
2432 		}
2433 
2434 	default:
2435 		/* Fallback to BR/EDR type */
2436 		return BDADDR_BREDR;
2437 	}
2438 }
2439 
2440 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2441 			   u16 data_len)
2442 {
2443 	struct mgmt_rp_get_connections *rp;
2444 	struct hci_conn *c;
2445 	size_t rp_len;
2446 	int err;
2447 	u16 i;
2448 
2449 	BT_DBG("");
2450 
2451 	hci_dev_lock(hdev);
2452 
2453 	if (!hdev_is_powered(hdev)) {
2454 		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2455 				 MGMT_STATUS_NOT_POWERED);
2456 		goto unlock;
2457 	}
2458 
2459 	i = 0;
2460 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2461 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2462 			i++;
2463 	}
2464 
2465 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2466 	rp = kmalloc(rp_len, GFP_KERNEL);
2467 	if (!rp) {
2468 		err = -ENOMEM;
2469 		goto unlock;
2470 	}
2471 
2472 	i = 0;
2473 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2474 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2475 			continue;
2476 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2477 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2478 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2479 			continue;
2480 		i++;
2481 	}
2482 
2483 	rp->conn_count = cpu_to_le16(i);
2484 
2485 	/* Recalculate length in case of filtered SCO connections, etc */
2486 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2487 
2488 	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2489 			   rp_len);
2490 
2491 	kfree(rp);
2492 
2493 unlock:
2494 	hci_dev_unlock(hdev);
2495 	return err;
2496 }
2497 
2498 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2499 				   struct mgmt_cp_pin_code_neg_reply *cp)
2500 {
2501 	struct pending_cmd *cmd;
2502 	int err;
2503 
2504 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2505 			       sizeof(*cp));
2506 	if (!cmd)
2507 		return -ENOMEM;
2508 
2509 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2510 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2511 	if (err < 0)
2512 		mgmt_pending_remove(cmd);
2513 
2514 	return err;
2515 }
2516 
2517 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2518 			  u16 len)
2519 {
2520 	struct hci_conn *conn;
2521 	struct mgmt_cp_pin_code_reply *cp = data;
2522 	struct hci_cp_pin_code_reply reply;
2523 	struct pending_cmd *cmd;
2524 	int err;
2525 
2526 	BT_DBG("");
2527 
2528 	hci_dev_lock(hdev);
2529 
2530 	if (!hdev_is_powered(hdev)) {
2531 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2532 				 MGMT_STATUS_NOT_POWERED);
2533 		goto failed;
2534 	}
2535 
2536 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2537 	if (!conn) {
2538 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2539 				 MGMT_STATUS_NOT_CONNECTED);
2540 		goto failed;
2541 	}
2542 
2543 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2544 		struct mgmt_cp_pin_code_neg_reply ncp;
2545 
2546 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2547 
2548 		BT_ERR("PIN code is not 16 bytes long");
2549 
2550 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2551 		if (err >= 0)
2552 			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2553 					 MGMT_STATUS_INVALID_PARAMS);
2554 
2555 		goto failed;
2556 	}
2557 
2558 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2559 	if (!cmd) {
2560 		err = -ENOMEM;
2561 		goto failed;
2562 	}
2563 
2564 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2565 	reply.pin_len = cp->pin_len;
2566 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2567 
2568 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2569 	if (err < 0)
2570 		mgmt_pending_remove(cmd);
2571 
2572 failed:
2573 	hci_dev_unlock(hdev);
2574 	return err;
2575 }
2576 
2577 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2578 			     u16 len)
2579 {
2580 	struct mgmt_cp_set_io_capability *cp = data;
2581 
2582 	BT_DBG("");
2583 
2584 	hci_dev_lock(hdev);
2585 
2586 	hdev->io_capability = cp->io_capability;
2587 
2588 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2589 	       hdev->io_capability);
2590 
2591 	hci_dev_unlock(hdev);
2592 
2593 	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2594 			    0);
2595 }
2596 
2597 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2598 {
2599 	struct hci_dev *hdev = conn->hdev;
2600 	struct pending_cmd *cmd;
2601 
2602 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2603 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2604 			continue;
2605 
2606 		if (cmd->user_data != conn)
2607 			continue;
2608 
2609 		return cmd;
2610 	}
2611 
2612 	return NULL;
2613 }
2614 
2615 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2616 {
2617 	struct mgmt_rp_pair_device rp;
2618 	struct hci_conn *conn = cmd->user_data;
2619 
2620 	bacpy(&rp.addr.bdaddr, &conn->dst);
2621 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2622 
2623 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2624 		     &rp, sizeof(rp));
2625 
2626 	/* So we don't get further callbacks for this connection */
2627 	conn->connect_cfm_cb = NULL;
2628 	conn->security_cfm_cb = NULL;
2629 	conn->disconn_cfm_cb = NULL;
2630 
2631 	hci_conn_drop(conn);
2632 
2633 	mgmt_pending_remove(cmd);
2634 }
2635 
2636 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2637 {
2638 	struct pending_cmd *cmd;
2639 
2640 	BT_DBG("status %u", status);
2641 
2642 	cmd = find_pairing(conn);
2643 	if (!cmd)
2644 		BT_DBG("Unable to find a pending command");
2645 	else
2646 		pairing_complete(cmd, mgmt_status(status));
2647 }
2648 
2649 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2650 {
2651 	struct pending_cmd *cmd;
2652 
2653 	BT_DBG("status %u", status);
2654 
2655 	if (!status)
2656 		return;
2657 
2658 	cmd = find_pairing(conn);
2659 	if (!cmd)
2660 		BT_DBG("Unable to find a pending command");
2661 	else
2662 		pairing_complete(cmd, mgmt_status(status));
2663 }
2664 
2665 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2666 		       u16 len)
2667 {
2668 	struct mgmt_cp_pair_device *cp = data;
2669 	struct mgmt_rp_pair_device rp;
2670 	struct pending_cmd *cmd;
2671 	u8 sec_level, auth_type;
2672 	struct hci_conn *conn;
2673 	int err;
2674 
2675 	BT_DBG("");
2676 
2677 	memset(&rp, 0, sizeof(rp));
2678 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2679 	rp.addr.type = cp->addr.type;
2680 
2681 	if (!bdaddr_type_is_valid(cp->addr.type))
2682 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2683 				    MGMT_STATUS_INVALID_PARAMS,
2684 				    &rp, sizeof(rp));
2685 
2686 	hci_dev_lock(hdev);
2687 
2688 	if (!hdev_is_powered(hdev)) {
2689 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2690 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2691 		goto unlock;
2692 	}
2693 
2694 	sec_level = BT_SECURITY_MEDIUM;
2695 	if (cp->io_cap == 0x03)
2696 		auth_type = HCI_AT_DEDICATED_BONDING;
2697 	else
2698 		auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2699 
2700 	if (cp->addr.type == BDADDR_BREDR)
2701 		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2702 				   cp->addr.type, sec_level, auth_type);
2703 	else
2704 		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2705 				   cp->addr.type, sec_level, auth_type);
2706 
2707 	if (IS_ERR(conn)) {
2708 		int status;
2709 
2710 		if (PTR_ERR(conn) == -EBUSY)
2711 			status = MGMT_STATUS_BUSY;
2712 		else
2713 			status = MGMT_STATUS_CONNECT_FAILED;
2714 
2715 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2716 				   status, &rp,
2717 				   sizeof(rp));
2718 		goto unlock;
2719 	}
2720 
2721 	if (conn->connect_cfm_cb) {
2722 		hci_conn_drop(conn);
2723 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2724 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2725 		goto unlock;
2726 	}
2727 
2728 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2729 	if (!cmd) {
2730 		err = -ENOMEM;
2731 		hci_conn_drop(conn);
2732 		goto unlock;
2733 	}
2734 
2735 	/* For LE, just connecting isn't a proof that the pairing finished */
2736 	if (cp->addr.type == BDADDR_BREDR)
2737 		conn->connect_cfm_cb = pairing_complete_cb;
2738 	else
2739 		conn->connect_cfm_cb = le_connect_complete_cb;
2740 
2741 	conn->security_cfm_cb = pairing_complete_cb;
2742 	conn->disconn_cfm_cb = pairing_complete_cb;
2743 	conn->io_capability = cp->io_cap;
2744 	cmd->user_data = conn;
2745 
2746 	if (conn->state == BT_CONNECTED &&
2747 	    hci_conn_security(conn, sec_level, auth_type))
2748 		pairing_complete(cmd, 0);
2749 
2750 	err = 0;
2751 
2752 unlock:
2753 	hci_dev_unlock(hdev);
2754 	return err;
2755 }
2756 
2757 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2758 			      u16 len)
2759 {
2760 	struct mgmt_addr_info *addr = data;
2761 	struct pending_cmd *cmd;
2762 	struct hci_conn *conn;
2763 	int err;
2764 
2765 	BT_DBG("");
2766 
2767 	hci_dev_lock(hdev);
2768 
2769 	if (!hdev_is_powered(hdev)) {
2770 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2771 				 MGMT_STATUS_NOT_POWERED);
2772 		goto unlock;
2773 	}
2774 
2775 	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2776 	if (!cmd) {
2777 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2778 				 MGMT_STATUS_INVALID_PARAMS);
2779 		goto unlock;
2780 	}
2781 
2782 	conn = cmd->user_data;
2783 
2784 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2785 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2786 				 MGMT_STATUS_INVALID_PARAMS);
2787 		goto unlock;
2788 	}
2789 
2790 	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2791 
2792 	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2793 			   addr, sizeof(*addr));
2794 unlock:
2795 	hci_dev_unlock(hdev);
2796 	return err;
2797 }
2798 
2799 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2800 			     struct mgmt_addr_info *addr, u16 mgmt_op,
2801 			     u16 hci_op, __le32 passkey)
2802 {
2803 	struct pending_cmd *cmd;
2804 	struct hci_conn *conn;
2805 	int err;
2806 
2807 	hci_dev_lock(hdev);
2808 
2809 	if (!hdev_is_powered(hdev)) {
2810 		err = cmd_complete(sk, hdev->id, mgmt_op,
2811 				   MGMT_STATUS_NOT_POWERED, addr,
2812 				   sizeof(*addr));
2813 		goto done;
2814 	}
2815 
2816 	if (addr->type == BDADDR_BREDR)
2817 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2818 	else
2819 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2820 
2821 	if (!conn) {
2822 		err = cmd_complete(sk, hdev->id, mgmt_op,
2823 				   MGMT_STATUS_NOT_CONNECTED, addr,
2824 				   sizeof(*addr));
2825 		goto done;
2826 	}
2827 
2828 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2829 		/* Continue with pairing via SMP */
2830 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2831 
2832 		if (!err)
2833 			err = cmd_complete(sk, hdev->id, mgmt_op,
2834 					   MGMT_STATUS_SUCCESS, addr,
2835 					   sizeof(*addr));
2836 		else
2837 			err = cmd_complete(sk, hdev->id, mgmt_op,
2838 					   MGMT_STATUS_FAILED, addr,
2839 					   sizeof(*addr));
2840 
2841 		goto done;
2842 	}
2843 
2844 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2845 	if (!cmd) {
2846 		err = -ENOMEM;
2847 		goto done;
2848 	}
2849 
2850 	/* Continue with pairing via HCI */
2851 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2852 		struct hci_cp_user_passkey_reply cp;
2853 
2854 		bacpy(&cp.bdaddr, &addr->bdaddr);
2855 		cp.passkey = passkey;
2856 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2857 	} else
2858 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2859 				   &addr->bdaddr);
2860 
2861 	if (err < 0)
2862 		mgmt_pending_remove(cmd);
2863 
2864 done:
2865 	hci_dev_unlock(hdev);
2866 	return err;
2867 }
2868 
2869 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2870 			      void *data, u16 len)
2871 {
2872 	struct mgmt_cp_pin_code_neg_reply *cp = data;
2873 
2874 	BT_DBG("");
2875 
2876 	return user_pairing_resp(sk, hdev, &cp->addr,
2877 				MGMT_OP_PIN_CODE_NEG_REPLY,
2878 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
2879 }
2880 
2881 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2882 			      u16 len)
2883 {
2884 	struct mgmt_cp_user_confirm_reply *cp = data;
2885 
2886 	BT_DBG("");
2887 
2888 	if (len != sizeof(*cp))
2889 		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2890 				  MGMT_STATUS_INVALID_PARAMS);
2891 
2892 	return user_pairing_resp(sk, hdev, &cp->addr,
2893 				 MGMT_OP_USER_CONFIRM_REPLY,
2894 				 HCI_OP_USER_CONFIRM_REPLY, 0);
2895 }
2896 
2897 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2898 				  void *data, u16 len)
2899 {
2900 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2901 
2902 	BT_DBG("");
2903 
2904 	return user_pairing_resp(sk, hdev, &cp->addr,
2905 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2906 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2907 }
2908 
2909 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2910 			      u16 len)
2911 {
2912 	struct mgmt_cp_user_passkey_reply *cp = data;
2913 
2914 	BT_DBG("");
2915 
2916 	return user_pairing_resp(sk, hdev, &cp->addr,
2917 				 MGMT_OP_USER_PASSKEY_REPLY,
2918 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2919 }
2920 
2921 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2922 				  void *data, u16 len)
2923 {
2924 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2925 
2926 	BT_DBG("");
2927 
2928 	return user_pairing_resp(sk, hdev, &cp->addr,
2929 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2930 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2931 }
2932 
2933 static void update_name(struct hci_request *req)
2934 {
2935 	struct hci_dev *hdev = req->hdev;
2936 	struct hci_cp_write_local_name cp;
2937 
2938 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2939 
2940 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2941 }
2942 
2943 static void set_name_complete(struct hci_dev *hdev, u8 status)
2944 {
2945 	struct mgmt_cp_set_local_name *cp;
2946 	struct pending_cmd *cmd;
2947 
2948 	BT_DBG("status 0x%02x", status);
2949 
2950 	hci_dev_lock(hdev);
2951 
2952 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2953 	if (!cmd)
2954 		goto unlock;
2955 
2956 	cp = cmd->param;
2957 
2958 	if (status)
2959 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2960 			   mgmt_status(status));
2961 	else
2962 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2963 			     cp, sizeof(*cp));
2964 
2965 	mgmt_pending_remove(cmd);
2966 
2967 unlock:
2968 	hci_dev_unlock(hdev);
2969 }
2970 
2971 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2972 			  u16 len)
2973 {
2974 	struct mgmt_cp_set_local_name *cp = data;
2975 	struct pending_cmd *cmd;
2976 	struct hci_request req;
2977 	int err;
2978 
2979 	BT_DBG("");
2980 
2981 	hci_dev_lock(hdev);
2982 
2983 	/* If the old values are the same as the new ones just return a
2984 	 * direct command complete event.
2985 	 */
2986 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2987 	    !memcmp(hdev->short_name, cp->short_name,
2988 		    sizeof(hdev->short_name))) {
2989 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2990 				   data, len);
2991 		goto failed;
2992 	}
2993 
2994 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2995 
2996 	if (!hdev_is_powered(hdev)) {
2997 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2998 
2999 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3000 				   data, len);
3001 		if (err < 0)
3002 			goto failed;
3003 
3004 		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3005 				 sk);
3006 
3007 		goto failed;
3008 	}
3009 
3010 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3011 	if (!cmd) {
3012 		err = -ENOMEM;
3013 		goto failed;
3014 	}
3015 
3016 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3017 
3018 	hci_req_init(&req, hdev);
3019 
3020 	if (lmp_bredr_capable(hdev)) {
3021 		update_name(&req);
3022 		update_eir(&req);
3023 	}
3024 
3025 	/* The name is stored in the scan response data and so
3026 	 * no need to udpate the advertising data here.
3027 	 */
3028 	if (lmp_le_capable(hdev))
3029 		update_scan_rsp_data(&req);
3030 
3031 	err = hci_req_run(&req, set_name_complete);
3032 	if (err < 0)
3033 		mgmt_pending_remove(cmd);
3034 
3035 failed:
3036 	hci_dev_unlock(hdev);
3037 	return err;
3038 }
3039 
3040 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3041 			       void *data, u16 data_len)
3042 {
3043 	struct pending_cmd *cmd;
3044 	int err;
3045 
3046 	BT_DBG("%s", hdev->name);
3047 
3048 	hci_dev_lock(hdev);
3049 
3050 	if (!hdev_is_powered(hdev)) {
3051 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3052 				 MGMT_STATUS_NOT_POWERED);
3053 		goto unlock;
3054 	}
3055 
3056 	if (!lmp_ssp_capable(hdev)) {
3057 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3058 				 MGMT_STATUS_NOT_SUPPORTED);
3059 		goto unlock;
3060 	}
3061 
3062 	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3063 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3064 				 MGMT_STATUS_BUSY);
3065 		goto unlock;
3066 	}
3067 
3068 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3069 	if (!cmd) {
3070 		err = -ENOMEM;
3071 		goto unlock;
3072 	}
3073 
3074 	err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3075 	if (err < 0)
3076 		mgmt_pending_remove(cmd);
3077 
3078 unlock:
3079 	hci_dev_unlock(hdev);
3080 	return err;
3081 }
3082 
3083 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3084 			       void *data, u16 len)
3085 {
3086 	struct mgmt_cp_add_remote_oob_data *cp = data;
3087 	u8 status;
3088 	int err;
3089 
3090 	BT_DBG("%s ", hdev->name);
3091 
3092 	hci_dev_lock(hdev);
3093 
3094 	err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3095 				      cp->randomizer);
3096 	if (err < 0)
3097 		status = MGMT_STATUS_FAILED;
3098 	else
3099 		status = MGMT_STATUS_SUCCESS;
3100 
3101 	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3102 			   &cp->addr, sizeof(cp->addr));
3103 
3104 	hci_dev_unlock(hdev);
3105 	return err;
3106 }
3107 
3108 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3109 				  void *data, u16 len)
3110 {
3111 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3112 	u8 status;
3113 	int err;
3114 
3115 	BT_DBG("%s", hdev->name);
3116 
3117 	hci_dev_lock(hdev);
3118 
3119 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3120 	if (err < 0)
3121 		status = MGMT_STATUS_INVALID_PARAMS;
3122 	else
3123 		status = MGMT_STATUS_SUCCESS;
3124 
3125 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3126 			   status, &cp->addr, sizeof(cp->addr));
3127 
3128 	hci_dev_unlock(hdev);
3129 	return err;
3130 }
3131 
3132 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3133 {
3134 	struct pending_cmd *cmd;
3135 	u8 type;
3136 	int err;
3137 
3138 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3139 
3140 	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3141 	if (!cmd)
3142 		return -ENOENT;
3143 
3144 	type = hdev->discovery.type;
3145 
3146 	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3147 			   &type, sizeof(type));
3148 	mgmt_pending_remove(cmd);
3149 
3150 	return err;
3151 }
3152 
3153 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3154 {
3155 	BT_DBG("status %d", status);
3156 
3157 	if (status) {
3158 		hci_dev_lock(hdev);
3159 		mgmt_start_discovery_failed(hdev, status);
3160 		hci_dev_unlock(hdev);
3161 		return;
3162 	}
3163 
3164 	hci_dev_lock(hdev);
3165 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3166 	hci_dev_unlock(hdev);
3167 
3168 	switch (hdev->discovery.type) {
3169 	case DISCOV_TYPE_LE:
3170 		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3171 				   DISCOV_LE_TIMEOUT);
3172 		break;
3173 
3174 	case DISCOV_TYPE_INTERLEAVED:
3175 		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3176 				   DISCOV_INTERLEAVED_TIMEOUT);
3177 		break;
3178 
3179 	case DISCOV_TYPE_BREDR:
3180 		break;
3181 
3182 	default:
3183 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3184 	}
3185 }
3186 
3187 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3188 			   void *data, u16 len)
3189 {
3190 	struct mgmt_cp_start_discovery *cp = data;
3191 	struct pending_cmd *cmd;
3192 	struct hci_cp_le_set_scan_param param_cp;
3193 	struct hci_cp_le_set_scan_enable enable_cp;
3194 	struct hci_cp_inquiry inq_cp;
3195 	struct hci_request req;
3196 	/* General inquiry access code (GIAC) */
3197 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3198 	u8 status;
3199 	int err;
3200 
3201 	BT_DBG("%s", hdev->name);
3202 
3203 	hci_dev_lock(hdev);
3204 
3205 	if (!hdev_is_powered(hdev)) {
3206 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3207 				 MGMT_STATUS_NOT_POWERED);
3208 		goto failed;
3209 	}
3210 
3211 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3212 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3213 				 MGMT_STATUS_BUSY);
3214 		goto failed;
3215 	}
3216 
3217 	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3218 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3219 				 MGMT_STATUS_BUSY);
3220 		goto failed;
3221 	}
3222 
3223 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3224 	if (!cmd) {
3225 		err = -ENOMEM;
3226 		goto failed;
3227 	}
3228 
3229 	hdev->discovery.type = cp->type;
3230 
3231 	hci_req_init(&req, hdev);
3232 
3233 	switch (hdev->discovery.type) {
3234 	case DISCOV_TYPE_BREDR:
3235 		status = mgmt_bredr_support(hdev);
3236 		if (status) {
3237 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3238 					 status);
3239 			mgmt_pending_remove(cmd);
3240 			goto failed;
3241 		}
3242 
3243 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3244 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3245 					 MGMT_STATUS_BUSY);
3246 			mgmt_pending_remove(cmd);
3247 			goto failed;
3248 		}
3249 
3250 		hci_inquiry_cache_flush(hdev);
3251 
3252 		memset(&inq_cp, 0, sizeof(inq_cp));
3253 		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3254 		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3255 		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3256 		break;
3257 
3258 	case DISCOV_TYPE_LE:
3259 	case DISCOV_TYPE_INTERLEAVED:
3260 		status = mgmt_le_support(hdev);
3261 		if (status) {
3262 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3263 					 status);
3264 			mgmt_pending_remove(cmd);
3265 			goto failed;
3266 		}
3267 
3268 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3269 		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3270 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3271 					 MGMT_STATUS_NOT_SUPPORTED);
3272 			mgmt_pending_remove(cmd);
3273 			goto failed;
3274 		}
3275 
3276 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3277 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3278 					 MGMT_STATUS_REJECTED);
3279 			mgmt_pending_remove(cmd);
3280 			goto failed;
3281 		}
3282 
3283 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3284 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3285 					 MGMT_STATUS_BUSY);
3286 			mgmt_pending_remove(cmd);
3287 			goto failed;
3288 		}
3289 
3290 		memset(&param_cp, 0, sizeof(param_cp));
3291 		param_cp.type = LE_SCAN_ACTIVE;
3292 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3293 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3294 		param_cp.own_address_type = hdev->own_addr_type;
3295 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3296 			    &param_cp);
3297 
3298 		memset(&enable_cp, 0, sizeof(enable_cp));
3299 		enable_cp.enable = LE_SCAN_ENABLE;
3300 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3301 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3302 			    &enable_cp);
3303 		break;
3304 
3305 	default:
3306 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3307 				 MGMT_STATUS_INVALID_PARAMS);
3308 		mgmt_pending_remove(cmd);
3309 		goto failed;
3310 	}
3311 
3312 	err = hci_req_run(&req, start_discovery_complete);
3313 	if (err < 0)
3314 		mgmt_pending_remove(cmd);
3315 	else
3316 		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3317 
3318 failed:
3319 	hci_dev_unlock(hdev);
3320 	return err;
3321 }
3322 
3323 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3324 {
3325 	struct pending_cmd *cmd;
3326 	int err;
3327 
3328 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3329 	if (!cmd)
3330 		return -ENOENT;
3331 
3332 	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3333 			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3334 	mgmt_pending_remove(cmd);
3335 
3336 	return err;
3337 }
3338 
3339 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3340 {
3341 	BT_DBG("status %d", status);
3342 
3343 	hci_dev_lock(hdev);
3344 
3345 	if (status) {
3346 		mgmt_stop_discovery_failed(hdev, status);
3347 		goto unlock;
3348 	}
3349 
3350 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3351 
3352 unlock:
3353 	hci_dev_unlock(hdev);
3354 }
3355 
3356 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3357 			  u16 len)
3358 {
3359 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3360 	struct pending_cmd *cmd;
3361 	struct hci_cp_remote_name_req_cancel cp;
3362 	struct inquiry_entry *e;
3363 	struct hci_request req;
3364 	struct hci_cp_le_set_scan_enable enable_cp;
3365 	int err;
3366 
3367 	BT_DBG("%s", hdev->name);
3368 
3369 	hci_dev_lock(hdev);
3370 
3371 	if (!hci_discovery_active(hdev)) {
3372 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3373 				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
3374 				   sizeof(mgmt_cp->type));
3375 		goto unlock;
3376 	}
3377 
3378 	if (hdev->discovery.type != mgmt_cp->type) {
3379 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3380 				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3381 				   sizeof(mgmt_cp->type));
3382 		goto unlock;
3383 	}
3384 
3385 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3386 	if (!cmd) {
3387 		err = -ENOMEM;
3388 		goto unlock;
3389 	}
3390 
3391 	hci_req_init(&req, hdev);
3392 
3393 	switch (hdev->discovery.state) {
3394 	case DISCOVERY_FINDING:
3395 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3396 			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3397 		} else {
3398 			cancel_delayed_work(&hdev->le_scan_disable);
3399 
3400 			memset(&enable_cp, 0, sizeof(enable_cp));
3401 			enable_cp.enable = LE_SCAN_DISABLE;
3402 			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3403 				    sizeof(enable_cp), &enable_cp);
3404 		}
3405 
3406 		break;
3407 
3408 	case DISCOVERY_RESOLVING:
3409 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3410 						     NAME_PENDING);
3411 		if (!e) {
3412 			mgmt_pending_remove(cmd);
3413 			err = cmd_complete(sk, hdev->id,
3414 					   MGMT_OP_STOP_DISCOVERY, 0,
3415 					   &mgmt_cp->type,
3416 					   sizeof(mgmt_cp->type));
3417 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3418 			goto unlock;
3419 		}
3420 
3421 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3422 		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3423 			    &cp);
3424 
3425 		break;
3426 
3427 	default:
3428 		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3429 
3430 		mgmt_pending_remove(cmd);
3431 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3432 				   MGMT_STATUS_FAILED, &mgmt_cp->type,
3433 				   sizeof(mgmt_cp->type));
3434 		goto unlock;
3435 	}
3436 
3437 	err = hci_req_run(&req, stop_discovery_complete);
3438 	if (err < 0)
3439 		mgmt_pending_remove(cmd);
3440 	else
3441 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3442 
3443 unlock:
3444 	hci_dev_unlock(hdev);
3445 	return err;
3446 }
3447 
3448 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3449 			u16 len)
3450 {
3451 	struct mgmt_cp_confirm_name *cp = data;
3452 	struct inquiry_entry *e;
3453 	int err;
3454 
3455 	BT_DBG("%s", hdev->name);
3456 
3457 	hci_dev_lock(hdev);
3458 
3459 	if (!hci_discovery_active(hdev)) {
3460 		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3461 				 MGMT_STATUS_FAILED);
3462 		goto failed;
3463 	}
3464 
3465 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3466 	if (!e) {
3467 		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3468 				 MGMT_STATUS_INVALID_PARAMS);
3469 		goto failed;
3470 	}
3471 
3472 	if (cp->name_known) {
3473 		e->name_state = NAME_KNOWN;
3474 		list_del(&e->list);
3475 	} else {
3476 		e->name_state = NAME_NEEDED;
3477 		hci_inquiry_cache_update_resolve(hdev, e);
3478 	}
3479 
3480 	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3481 			   sizeof(cp->addr));
3482 
3483 failed:
3484 	hci_dev_unlock(hdev);
3485 	return err;
3486 }
3487 
3488 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3489 			u16 len)
3490 {
3491 	struct mgmt_cp_block_device *cp = data;
3492 	u8 status;
3493 	int err;
3494 
3495 	BT_DBG("%s", hdev->name);
3496 
3497 	if (!bdaddr_type_is_valid(cp->addr.type))
3498 		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3499 				    MGMT_STATUS_INVALID_PARAMS,
3500 				    &cp->addr, sizeof(cp->addr));
3501 
3502 	hci_dev_lock(hdev);
3503 
3504 	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3505 	if (err < 0)
3506 		status = MGMT_STATUS_FAILED;
3507 	else
3508 		status = MGMT_STATUS_SUCCESS;
3509 
3510 	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3511 			   &cp->addr, sizeof(cp->addr));
3512 
3513 	hci_dev_unlock(hdev);
3514 
3515 	return err;
3516 }
3517 
3518 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3519 			  u16 len)
3520 {
3521 	struct mgmt_cp_unblock_device *cp = data;
3522 	u8 status;
3523 	int err;
3524 
3525 	BT_DBG("%s", hdev->name);
3526 
3527 	if (!bdaddr_type_is_valid(cp->addr.type))
3528 		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3529 				    MGMT_STATUS_INVALID_PARAMS,
3530 				    &cp->addr, sizeof(cp->addr));
3531 
3532 	hci_dev_lock(hdev);
3533 
3534 	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3535 	if (err < 0)
3536 		status = MGMT_STATUS_INVALID_PARAMS;
3537 	else
3538 		status = MGMT_STATUS_SUCCESS;
3539 
3540 	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3541 			   &cp->addr, sizeof(cp->addr));
3542 
3543 	hci_dev_unlock(hdev);
3544 
3545 	return err;
3546 }
3547 
3548 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3549 			 u16 len)
3550 {
3551 	struct mgmt_cp_set_device_id *cp = data;
3552 	struct hci_request req;
3553 	int err;
3554 	__u16 source;
3555 
3556 	BT_DBG("%s", hdev->name);
3557 
3558 	source = __le16_to_cpu(cp->source);
3559 
3560 	if (source > 0x0002)
3561 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3562 				  MGMT_STATUS_INVALID_PARAMS);
3563 
3564 	hci_dev_lock(hdev);
3565 
3566 	hdev->devid_source = source;
3567 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3568 	hdev->devid_product = __le16_to_cpu(cp->product);
3569 	hdev->devid_version = __le16_to_cpu(cp->version);
3570 
3571 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3572 
3573 	hci_req_init(&req, hdev);
3574 	update_eir(&req);
3575 	hci_req_run(&req, NULL);
3576 
3577 	hci_dev_unlock(hdev);
3578 
3579 	return err;
3580 }
3581 
3582 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3583 {
3584 	struct cmd_lookup match = { NULL, hdev };
3585 
3586 	if (status) {
3587 		u8 mgmt_err = mgmt_status(status);
3588 
3589 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3590 				     cmd_status_rsp, &mgmt_err);
3591 		return;
3592 	}
3593 
3594 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3595 			     &match);
3596 
3597 	new_settings(hdev, match.sk);
3598 
3599 	if (match.sk)
3600 		sock_put(match.sk);
3601 }
3602 
3603 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3604 			   u16 len)
3605 {
3606 	struct mgmt_mode *cp = data;
3607 	struct pending_cmd *cmd;
3608 	struct hci_request req;
3609 	u8 val, enabled, status;
3610 	int err;
3611 
3612 	BT_DBG("request for %s", hdev->name);
3613 
3614 	status = mgmt_le_support(hdev);
3615 	if (status)
3616 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3617 				  status);
3618 
3619 	if (cp->val != 0x00 && cp->val != 0x01)
3620 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3621 				  MGMT_STATUS_INVALID_PARAMS);
3622 
3623 	hci_dev_lock(hdev);
3624 
3625 	val = !!cp->val;
3626 	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3627 
3628 	/* The following conditions are ones which mean that we should
3629 	 * not do any HCI communication but directly send a mgmt
3630 	 * response to user space (after toggling the flag if
3631 	 * necessary).
3632 	 */
3633 	if (!hdev_is_powered(hdev) || val == enabled ||
3634 	    hci_conn_num(hdev, LE_LINK) > 0) {
3635 		bool changed = false;
3636 
3637 		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3638 			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3639 			changed = true;
3640 		}
3641 
3642 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3643 		if (err < 0)
3644 			goto unlock;
3645 
3646 		if (changed)
3647 			err = new_settings(hdev, sk);
3648 
3649 		goto unlock;
3650 	}
3651 
3652 	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3653 	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3654 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3655 				 MGMT_STATUS_BUSY);
3656 		goto unlock;
3657 	}
3658 
3659 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3660 	if (!cmd) {
3661 		err = -ENOMEM;
3662 		goto unlock;
3663 	}
3664 
3665 	hci_req_init(&req, hdev);
3666 
3667 	if (val)
3668 		enable_advertising(&req);
3669 	else
3670 		disable_advertising(&req);
3671 
3672 	err = hci_req_run(&req, set_advertising_complete);
3673 	if (err < 0)
3674 		mgmt_pending_remove(cmd);
3675 
3676 unlock:
3677 	hci_dev_unlock(hdev);
3678 	return err;
3679 }
3680 
3681 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3682 			      void *data, u16 len)
3683 {
3684 	struct mgmt_cp_set_static_address *cp = data;
3685 	int err;
3686 
3687 	BT_DBG("%s", hdev->name);
3688 
3689 	if (!lmp_le_capable(hdev))
3690 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3691 				  MGMT_STATUS_NOT_SUPPORTED);
3692 
3693 	if (hdev_is_powered(hdev))
3694 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3695 				  MGMT_STATUS_REJECTED);
3696 
3697 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3698 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3699 			return cmd_status(sk, hdev->id,
3700 					  MGMT_OP_SET_STATIC_ADDRESS,
3701 					  MGMT_STATUS_INVALID_PARAMS);
3702 
3703 		/* Two most significant bits shall be set */
3704 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3705 			return cmd_status(sk, hdev->id,
3706 					  MGMT_OP_SET_STATIC_ADDRESS,
3707 					  MGMT_STATUS_INVALID_PARAMS);
3708 	}
3709 
3710 	hci_dev_lock(hdev);
3711 
3712 	bacpy(&hdev->static_addr, &cp->bdaddr);
3713 
3714 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3715 
3716 	hci_dev_unlock(hdev);
3717 
3718 	return err;
3719 }
3720 
3721 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3722 			   void *data, u16 len)
3723 {
3724 	struct mgmt_cp_set_scan_params *cp = data;
3725 	__u16 interval, window;
3726 	int err;
3727 
3728 	BT_DBG("%s", hdev->name);
3729 
3730 	if (!lmp_le_capable(hdev))
3731 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3732 				  MGMT_STATUS_NOT_SUPPORTED);
3733 
3734 	interval = __le16_to_cpu(cp->interval);
3735 
3736 	if (interval < 0x0004 || interval > 0x4000)
3737 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3738 				  MGMT_STATUS_INVALID_PARAMS);
3739 
3740 	window = __le16_to_cpu(cp->window);
3741 
3742 	if (window < 0x0004 || window > 0x4000)
3743 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3744 				  MGMT_STATUS_INVALID_PARAMS);
3745 
3746 	if (window > interval)
3747 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3748 				  MGMT_STATUS_INVALID_PARAMS);
3749 
3750 	hci_dev_lock(hdev);
3751 
3752 	hdev->le_scan_interval = interval;
3753 	hdev->le_scan_window = window;
3754 
3755 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3756 
3757 	hci_dev_unlock(hdev);
3758 
3759 	return err;
3760 }
3761 
3762 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3763 {
3764 	struct pending_cmd *cmd;
3765 
3766 	BT_DBG("status 0x%02x", status);
3767 
3768 	hci_dev_lock(hdev);
3769 
3770 	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3771 	if (!cmd)
3772 		goto unlock;
3773 
3774 	if (status) {
3775 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3776 			   mgmt_status(status));
3777 	} else {
3778 		struct mgmt_mode *cp = cmd->param;
3779 
3780 		if (cp->val)
3781 			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3782 		else
3783 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3784 
3785 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3786 		new_settings(hdev, cmd->sk);
3787 	}
3788 
3789 	mgmt_pending_remove(cmd);
3790 
3791 unlock:
3792 	hci_dev_unlock(hdev);
3793 }
3794 
3795 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3796 				void *data, u16 len)
3797 {
3798 	struct mgmt_mode *cp = data;
3799 	struct pending_cmd *cmd;
3800 	struct hci_request req;
3801 	int err;
3802 
3803 	BT_DBG("%s", hdev->name);
3804 
3805 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3806 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
3807 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3808 				  MGMT_STATUS_NOT_SUPPORTED);
3809 
3810 	if (cp->val != 0x00 && cp->val != 0x01)
3811 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3812 				  MGMT_STATUS_INVALID_PARAMS);
3813 
3814 	if (!hdev_is_powered(hdev))
3815 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3816 				  MGMT_STATUS_NOT_POWERED);
3817 
3818 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3819 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3820 				  MGMT_STATUS_REJECTED);
3821 
3822 	hci_dev_lock(hdev);
3823 
3824 	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3825 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3826 				 MGMT_STATUS_BUSY);
3827 		goto unlock;
3828 	}
3829 
3830 	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3831 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3832 					hdev);
3833 		goto unlock;
3834 	}
3835 
3836 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3837 			       data, len);
3838 	if (!cmd) {
3839 		err = -ENOMEM;
3840 		goto unlock;
3841 	}
3842 
3843 	hci_req_init(&req, hdev);
3844 
3845 	write_fast_connectable(&req, cp->val);
3846 
3847 	err = hci_req_run(&req, fast_connectable_complete);
3848 	if (err < 0) {
3849 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3850 				 MGMT_STATUS_FAILED);
3851 		mgmt_pending_remove(cmd);
3852 	}
3853 
3854 unlock:
3855 	hci_dev_unlock(hdev);
3856 
3857 	return err;
3858 }
3859 
3860 static void set_bredr_scan(struct hci_request *req)
3861 {
3862 	struct hci_dev *hdev = req->hdev;
3863 	u8 scan = 0;
3864 
3865 	/* Ensure that fast connectable is disabled. This function will
3866 	 * not do anything if the page scan parameters are already what
3867 	 * they should be.
3868 	 */
3869 	write_fast_connectable(req, false);
3870 
3871 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3872 		scan |= SCAN_PAGE;
3873 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3874 		scan |= SCAN_INQUIRY;
3875 
3876 	if (scan)
3877 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3878 }
3879 
3880 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3881 {
3882 	struct pending_cmd *cmd;
3883 
3884 	BT_DBG("status 0x%02x", status);
3885 
3886 	hci_dev_lock(hdev);
3887 
3888 	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3889 	if (!cmd)
3890 		goto unlock;
3891 
3892 	if (status) {
3893 		u8 mgmt_err = mgmt_status(status);
3894 
3895 		/* We need to restore the flag if related HCI commands
3896 		 * failed.
3897 		 */
3898 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3899 
3900 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3901 	} else {
3902 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3903 		new_settings(hdev, cmd->sk);
3904 	}
3905 
3906 	mgmt_pending_remove(cmd);
3907 
3908 unlock:
3909 	hci_dev_unlock(hdev);
3910 }
3911 
3912 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3913 {
3914 	struct mgmt_mode *cp = data;
3915 	struct pending_cmd *cmd;
3916 	struct hci_request req;
3917 	int err;
3918 
3919 	BT_DBG("request for %s", hdev->name);
3920 
3921 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3922 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3923 				  MGMT_STATUS_NOT_SUPPORTED);
3924 
3925 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3926 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3927 				  MGMT_STATUS_REJECTED);
3928 
3929 	if (cp->val != 0x00 && cp->val != 0x01)
3930 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3931 				  MGMT_STATUS_INVALID_PARAMS);
3932 
3933 	hci_dev_lock(hdev);
3934 
3935 	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3936 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3937 		goto unlock;
3938 	}
3939 
3940 	if (!hdev_is_powered(hdev)) {
3941 		if (!cp->val) {
3942 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3943 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3944 			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3945 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3946 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3947 		}
3948 
3949 		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3950 
3951 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3952 		if (err < 0)
3953 			goto unlock;
3954 
3955 		err = new_settings(hdev, sk);
3956 		goto unlock;
3957 	}
3958 
3959 	/* Reject disabling when powered on */
3960 	if (!cp->val) {
3961 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3962 				 MGMT_STATUS_REJECTED);
3963 		goto unlock;
3964 	}
3965 
3966 	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3967 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3968 				 MGMT_STATUS_BUSY);
3969 		goto unlock;
3970 	}
3971 
3972 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3973 	if (!cmd) {
3974 		err = -ENOMEM;
3975 		goto unlock;
3976 	}
3977 
3978 	/* We need to flip the bit already here so that update_adv_data
3979 	 * generates the correct flags.
3980 	 */
3981 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3982 
3983 	hci_req_init(&req, hdev);
3984 
3985 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3986 		set_bredr_scan(&req);
3987 
3988 	/* Since only the advertising data flags will change, there
3989 	 * is no need to update the scan response data.
3990 	 */
3991 	update_adv_data(&req);
3992 
3993 	err = hci_req_run(&req, set_bredr_complete);
3994 	if (err < 0)
3995 		mgmt_pending_remove(cmd);
3996 
3997 unlock:
3998 	hci_dev_unlock(hdev);
3999 	return err;
4000 }
4001 
4002 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4003 {
4004 	if (key->authenticated != 0x00 && key->authenticated != 0x01)
4005 		return false;
4006 	if (key->master != 0x00 && key->master != 0x01)
4007 		return false;
4008 	if (!bdaddr_type_is_le(key->addr.type))
4009 		return false;
4010 	return true;
4011 }
4012 
4013 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4014 			       void *cp_data, u16 len)
4015 {
4016 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4017 	u16 key_count, expected_len;
4018 	int i, err;
4019 
4020 	BT_DBG("request for %s", hdev->name);
4021 
4022 	if (!lmp_le_capable(hdev))
4023 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4024 				  MGMT_STATUS_NOT_SUPPORTED);
4025 
4026 	key_count = __le16_to_cpu(cp->key_count);
4027 
4028 	expected_len = sizeof(*cp) + key_count *
4029 					sizeof(struct mgmt_ltk_info);
4030 	if (expected_len != len) {
4031 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4032 		       len, expected_len);
4033 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4034 				  MGMT_STATUS_INVALID_PARAMS);
4035 	}
4036 
4037 	BT_DBG("%s key_count %u", hdev->name, key_count);
4038 
4039 	for (i = 0; i < key_count; i++) {
4040 		struct mgmt_ltk_info *key = &cp->keys[i];
4041 
4042 		if (!ltk_is_valid(key))
4043 			return cmd_status(sk, hdev->id,
4044 					  MGMT_OP_LOAD_LONG_TERM_KEYS,
4045 					  MGMT_STATUS_INVALID_PARAMS);
4046 	}
4047 
4048 	hci_dev_lock(hdev);
4049 
4050 	hci_smp_ltks_clear(hdev);
4051 
4052 	for (i = 0; i < key_count; i++) {
4053 		struct mgmt_ltk_info *key = &cp->keys[i];
4054 		u8 type, addr_type;
4055 
4056 		if (key->addr.type == BDADDR_LE_PUBLIC)
4057 			addr_type = ADDR_LE_DEV_PUBLIC;
4058 		else
4059 			addr_type = ADDR_LE_DEV_RANDOM;
4060 
4061 		if (key->master)
4062 			type = HCI_SMP_LTK;
4063 		else
4064 			type = HCI_SMP_LTK_SLAVE;
4065 
4066 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4067 			    type, 0, key->authenticated, key->val,
4068 			    key->enc_size, key->ediv, key->rand);
4069 	}
4070 
4071 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4072 			   NULL, 0);
4073 
4074 	hci_dev_unlock(hdev);
4075 
4076 	return err;
4077 }
4078 
4079 static const struct mgmt_handler {
4080 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4081 		     u16 data_len);
4082 	bool var_len;
4083 	size_t data_len;
4084 } mgmt_handlers[] = {
4085 	{ NULL }, /* 0x0000 (no command) */
4086 	{ read_version,           false, MGMT_READ_VERSION_SIZE },
4087 	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
4088 	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
4089 	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
4090 	{ set_powered,            false, MGMT_SETTING_SIZE },
4091 	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
4092 	{ set_connectable,        false, MGMT_SETTING_SIZE },
4093 	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
4094 	{ set_pairable,           false, MGMT_SETTING_SIZE },
4095 	{ set_link_security,      false, MGMT_SETTING_SIZE },
4096 	{ set_ssp,                false, MGMT_SETTING_SIZE },
4097 	{ set_hs,                 false, MGMT_SETTING_SIZE },
4098 	{ set_le,                 false, MGMT_SETTING_SIZE },
4099 	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
4100 	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
4101 	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
4102 	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
4103 	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
4104 	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4105 	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
4106 	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
4107 	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
4108 	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4109 	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
4110 	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
4111 	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4112 	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
4113 	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
4114 	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4115 	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
4116 	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4117 	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4118 	{ add_remote_oob_data,    false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4119 	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4120 	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
4121 	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
4122 	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
4123 	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
4124 	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4125 	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4126 	{ set_advertising,        false, MGMT_SETTING_SIZE },
4127 	{ set_bredr,              false, MGMT_SETTING_SIZE },
4128 	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4129 	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4130 };
4131 
4132 
4133 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4134 {
4135 	void *buf;
4136 	u8 *cp;
4137 	struct mgmt_hdr *hdr;
4138 	u16 opcode, index, len;
4139 	struct hci_dev *hdev = NULL;
4140 	const struct mgmt_handler *handler;
4141 	int err;
4142 
4143 	BT_DBG("got %zu bytes", msglen);
4144 
4145 	if (msglen < sizeof(*hdr))
4146 		return -EINVAL;
4147 
4148 	buf = kmalloc(msglen, GFP_KERNEL);
4149 	if (!buf)
4150 		return -ENOMEM;
4151 
4152 	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4153 		err = -EFAULT;
4154 		goto done;
4155 	}
4156 
4157 	hdr = buf;
4158 	opcode = __le16_to_cpu(hdr->opcode);
4159 	index = __le16_to_cpu(hdr->index);
4160 	len = __le16_to_cpu(hdr->len);
4161 
4162 	if (len != msglen - sizeof(*hdr)) {
4163 		err = -EINVAL;
4164 		goto done;
4165 	}
4166 
4167 	if (index != MGMT_INDEX_NONE) {
4168 		hdev = hci_dev_get(index);
4169 		if (!hdev) {
4170 			err = cmd_status(sk, index, opcode,
4171 					 MGMT_STATUS_INVALID_INDEX);
4172 			goto done;
4173 		}
4174 
4175 		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4176 		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4177 			err = cmd_status(sk, index, opcode,
4178 					 MGMT_STATUS_INVALID_INDEX);
4179 			goto done;
4180 		}
4181 	}
4182 
4183 	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4184 	    mgmt_handlers[opcode].func == NULL) {
4185 		BT_DBG("Unknown op %u", opcode);
4186 		err = cmd_status(sk, index, opcode,
4187 				 MGMT_STATUS_UNKNOWN_COMMAND);
4188 		goto done;
4189 	}
4190 
4191 	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4192 	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4193 		err = cmd_status(sk, index, opcode,
4194 				 MGMT_STATUS_INVALID_INDEX);
4195 		goto done;
4196 	}
4197 
4198 	handler = &mgmt_handlers[opcode];
4199 
4200 	if ((handler->var_len && len < handler->data_len) ||
4201 	    (!handler->var_len && len != handler->data_len)) {
4202 		err = cmd_status(sk, index, opcode,
4203 				 MGMT_STATUS_INVALID_PARAMS);
4204 		goto done;
4205 	}
4206 
4207 	if (hdev)
4208 		mgmt_init_hdev(sk, hdev);
4209 
4210 	cp = buf + sizeof(*hdr);
4211 
4212 	err = handler->func(sk, hdev, cp, len);
4213 	if (err < 0)
4214 		goto done;
4215 
4216 	err = msglen;
4217 
4218 done:
4219 	if (hdev)
4220 		hci_dev_put(hdev);
4221 
4222 	kfree(buf);
4223 	return err;
4224 }
4225 
4226 void mgmt_index_added(struct hci_dev *hdev)
4227 {
4228 	if (hdev->dev_type != HCI_BREDR)
4229 		return;
4230 
4231 	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4232 }
4233 
4234 void mgmt_index_removed(struct hci_dev *hdev)
4235 {
4236 	u8 status = MGMT_STATUS_INVALID_INDEX;
4237 
4238 	if (hdev->dev_type != HCI_BREDR)
4239 		return;
4240 
4241 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4242 
4243 	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4244 }
4245 
4246 static void powered_complete(struct hci_dev *hdev, u8 status)
4247 {
4248 	struct cmd_lookup match = { NULL, hdev };
4249 
4250 	BT_DBG("status 0x%02x", status);
4251 
4252 	hci_dev_lock(hdev);
4253 
4254 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4255 
4256 	new_settings(hdev, match.sk);
4257 
4258 	hci_dev_unlock(hdev);
4259 
4260 	if (match.sk)
4261 		sock_put(match.sk);
4262 }
4263 
4264 static int powered_update_hci(struct hci_dev *hdev)
4265 {
4266 	struct hci_request req;
4267 	u8 link_sec;
4268 
4269 	hci_req_init(&req, hdev);
4270 
4271 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4272 	    !lmp_host_ssp_capable(hdev)) {
4273 		u8 ssp = 1;
4274 
4275 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4276 	}
4277 
4278 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4279 	    lmp_bredr_capable(hdev)) {
4280 		struct hci_cp_write_le_host_supported cp;
4281 
4282 		cp.le = 1;
4283 		cp.simul = lmp_le_br_capable(hdev);
4284 
4285 		/* Check first if we already have the right
4286 		 * host state (host features set)
4287 		 */
4288 		if (cp.le != lmp_host_le_capable(hdev) ||
4289 		    cp.simul != lmp_host_le_br_capable(hdev))
4290 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4291 				    sizeof(cp), &cp);
4292 	}
4293 
4294 	if (lmp_le_capable(hdev)) {
4295 		/* Set random address to static address if configured */
4296 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
4297 			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4298 				    &hdev->static_addr);
4299 
4300 		/* Make sure the controller has a good default for
4301 		 * advertising data. This also applies to the case
4302 		 * where BR/EDR was toggled during the AUTO_OFF phase.
4303 		 */
4304 		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4305 			update_adv_data(&req);
4306 			update_scan_rsp_data(&req);
4307 		}
4308 
4309 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4310 			enable_advertising(&req);
4311 	}
4312 
4313 	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4314 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4315 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4316 			    sizeof(link_sec), &link_sec);
4317 
4318 	if (lmp_bredr_capable(hdev)) {
4319 		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4320 			set_bredr_scan(&req);
4321 		update_class(&req);
4322 		update_name(&req);
4323 		update_eir(&req);
4324 	}
4325 
4326 	return hci_req_run(&req, powered_complete);
4327 }
4328 
4329 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4330 {
4331 	struct cmd_lookup match = { NULL, hdev };
4332 	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4333 	u8 zero_cod[] = { 0, 0, 0 };
4334 	int err;
4335 
4336 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4337 		return 0;
4338 
4339 	if (powered) {
4340 		if (powered_update_hci(hdev) == 0)
4341 			return 0;
4342 
4343 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4344 				     &match);
4345 		goto new_settings;
4346 	}
4347 
4348 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4349 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4350 
4351 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4352 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4353 			   zero_cod, sizeof(zero_cod), NULL);
4354 
4355 new_settings:
4356 	err = new_settings(hdev, match.sk);
4357 
4358 	if (match.sk)
4359 		sock_put(match.sk);
4360 
4361 	return err;
4362 }
4363 
4364 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4365 {
4366 	struct pending_cmd *cmd;
4367 	u8 status;
4368 
4369 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4370 	if (!cmd)
4371 		return;
4372 
4373 	if (err == -ERFKILL)
4374 		status = MGMT_STATUS_RFKILLED;
4375 	else
4376 		status = MGMT_STATUS_FAILED;
4377 
4378 	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4379 
4380 	mgmt_pending_remove(cmd);
4381 }
4382 
4383 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4384 {
4385 	struct hci_request req;
4386 
4387 	hci_dev_lock(hdev);
4388 
4389 	/* When discoverable timeout triggers, then just make sure
4390 	 * the limited discoverable flag is cleared. Even in the case
4391 	 * of a timeout triggered from general discoverable, it is
4392 	 * safe to unconditionally clear the flag.
4393 	 */
4394 	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4395 	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4396 
4397 	hci_req_init(&req, hdev);
4398 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4399 		u8 scan = SCAN_PAGE;
4400 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4401 			    sizeof(scan), &scan);
4402 	}
4403 	update_class(&req);
4404 	update_adv_data(&req);
4405 	hci_req_run(&req, NULL);
4406 
4407 	hdev->discov_timeout = 0;
4408 
4409 	new_settings(hdev, NULL);
4410 
4411 	hci_dev_unlock(hdev);
4412 }
4413 
4414 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4415 {
4416 	bool changed;
4417 
4418 	/* Nothing needed here if there's a pending command since that
4419 	 * commands request completion callback takes care of everything
4420 	 * necessary.
4421 	 */
4422 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4423 		return;
4424 
4425 	if (discoverable) {
4426 		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4427 	} else {
4428 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4429 		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4430 	}
4431 
4432 	if (changed) {
4433 		struct hci_request req;
4434 
4435 		/* In case this change in discoverable was triggered by
4436 		 * a disabling of connectable there could be a need to
4437 		 * update the advertising flags.
4438 		 */
4439 		hci_req_init(&req, hdev);
4440 		update_adv_data(&req);
4441 		hci_req_run(&req, NULL);
4442 
4443 		new_settings(hdev, NULL);
4444 	}
4445 }
4446 
4447 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4448 {
4449 	bool changed;
4450 
4451 	/* Nothing needed here if there's a pending command since that
4452 	 * commands request completion callback takes care of everything
4453 	 * necessary.
4454 	 */
4455 	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4456 		return;
4457 
4458 	if (connectable)
4459 		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4460 	else
4461 		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4462 
4463 	if (changed)
4464 		new_settings(hdev, NULL);
4465 }
4466 
4467 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4468 {
4469 	u8 mgmt_err = mgmt_status(status);
4470 
4471 	if (scan & SCAN_PAGE)
4472 		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4473 				     cmd_status_rsp, &mgmt_err);
4474 
4475 	if (scan & SCAN_INQUIRY)
4476 		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4477 				     cmd_status_rsp, &mgmt_err);
4478 }
4479 
4480 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4481 		       bool persistent)
4482 {
4483 	struct mgmt_ev_new_link_key ev;
4484 
4485 	memset(&ev, 0, sizeof(ev));
4486 
4487 	ev.store_hint = persistent;
4488 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4489 	ev.key.addr.type = BDADDR_BREDR;
4490 	ev.key.type = key->type;
4491 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4492 	ev.key.pin_len = key->pin_len;
4493 
4494 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4495 }
4496 
4497 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4498 {
4499 	struct mgmt_ev_new_long_term_key ev;
4500 
4501 	memset(&ev, 0, sizeof(ev));
4502 
4503 	ev.store_hint = persistent;
4504 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4505 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4506 	ev.key.authenticated = key->authenticated;
4507 	ev.key.enc_size = key->enc_size;
4508 	ev.key.ediv = key->ediv;
4509 
4510 	if (key->type == HCI_SMP_LTK)
4511 		ev.key.master = 1;
4512 
4513 	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4514 	memcpy(ev.key.val, key->val, sizeof(key->val));
4515 
4516 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4517 }
4518 
4519 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4520 				  u8 data_len)
4521 {
4522 	eir[eir_len++] = sizeof(type) + data_len;
4523 	eir[eir_len++] = type;
4524 	memcpy(&eir[eir_len], data, data_len);
4525 	eir_len += data_len;
4526 
4527 	return eir_len;
4528 }
4529 
4530 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4531 			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
4532 			   u8 *dev_class)
4533 {
4534 	char buf[512];
4535 	struct mgmt_ev_device_connected *ev = (void *) buf;
4536 	u16 eir_len = 0;
4537 
4538 	bacpy(&ev->addr.bdaddr, bdaddr);
4539 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4540 
4541 	ev->flags = __cpu_to_le32(flags);
4542 
4543 	if (name_len > 0)
4544 		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4545 					  name, name_len);
4546 
4547 	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4548 		eir_len = eir_append_data(ev->eir, eir_len,
4549 					  EIR_CLASS_OF_DEV, dev_class, 3);
4550 
4551 	ev->eir_len = cpu_to_le16(eir_len);
4552 
4553 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4554 		    sizeof(*ev) + eir_len, NULL);
4555 }
4556 
4557 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4558 {
4559 	struct mgmt_cp_disconnect *cp = cmd->param;
4560 	struct sock **sk = data;
4561 	struct mgmt_rp_disconnect rp;
4562 
4563 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4564 	rp.addr.type = cp->addr.type;
4565 
4566 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4567 		     sizeof(rp));
4568 
4569 	*sk = cmd->sk;
4570 	sock_hold(*sk);
4571 
4572 	mgmt_pending_remove(cmd);
4573 }
4574 
4575 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4576 {
4577 	struct hci_dev *hdev = data;
4578 	struct mgmt_cp_unpair_device *cp = cmd->param;
4579 	struct mgmt_rp_unpair_device rp;
4580 
4581 	memset(&rp, 0, sizeof(rp));
4582 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4583 	rp.addr.type = cp->addr.type;
4584 
4585 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4586 
4587 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4588 
4589 	mgmt_pending_remove(cmd);
4590 }
4591 
4592 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4593 			      u8 link_type, u8 addr_type, u8 reason)
4594 {
4595 	struct mgmt_ev_device_disconnected ev;
4596 	struct sock *sk = NULL;
4597 
4598 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4599 
4600 	bacpy(&ev.addr.bdaddr, bdaddr);
4601 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4602 	ev.reason = reason;
4603 
4604 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4605 
4606 	if (sk)
4607 		sock_put(sk);
4608 
4609 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4610 			     hdev);
4611 }
4612 
4613 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4614 			    u8 link_type, u8 addr_type, u8 status)
4615 {
4616 	struct mgmt_rp_disconnect rp;
4617 	struct pending_cmd *cmd;
4618 
4619 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4620 			     hdev);
4621 
4622 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4623 	if (!cmd)
4624 		return;
4625 
4626 	bacpy(&rp.addr.bdaddr, bdaddr);
4627 	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4628 
4629 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4630 		     mgmt_status(status), &rp, sizeof(rp));
4631 
4632 	mgmt_pending_remove(cmd);
4633 }
4634 
4635 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4636 			 u8 addr_type, u8 status)
4637 {
4638 	struct mgmt_ev_connect_failed ev;
4639 
4640 	bacpy(&ev.addr.bdaddr, bdaddr);
4641 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4642 	ev.status = mgmt_status(status);
4643 
4644 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4645 }
4646 
4647 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4648 {
4649 	struct mgmt_ev_pin_code_request ev;
4650 
4651 	bacpy(&ev.addr.bdaddr, bdaddr);
4652 	ev.addr.type = BDADDR_BREDR;
4653 	ev.secure = secure;
4654 
4655 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4656 }
4657 
4658 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4659 				  u8 status)
4660 {
4661 	struct pending_cmd *cmd;
4662 	struct mgmt_rp_pin_code_reply rp;
4663 
4664 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4665 	if (!cmd)
4666 		return;
4667 
4668 	bacpy(&rp.addr.bdaddr, bdaddr);
4669 	rp.addr.type = BDADDR_BREDR;
4670 
4671 	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4672 		     mgmt_status(status), &rp, sizeof(rp));
4673 
4674 	mgmt_pending_remove(cmd);
4675 }
4676 
4677 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4678 				      u8 status)
4679 {
4680 	struct pending_cmd *cmd;
4681 	struct mgmt_rp_pin_code_reply rp;
4682 
4683 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4684 	if (!cmd)
4685 		return;
4686 
4687 	bacpy(&rp.addr.bdaddr, bdaddr);
4688 	rp.addr.type = BDADDR_BREDR;
4689 
4690 	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4691 		     mgmt_status(status), &rp, sizeof(rp));
4692 
4693 	mgmt_pending_remove(cmd);
4694 }
4695 
4696 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4697 			      u8 link_type, u8 addr_type, __le32 value,
4698 			      u8 confirm_hint)
4699 {
4700 	struct mgmt_ev_user_confirm_request ev;
4701 
4702 	BT_DBG("%s", hdev->name);
4703 
4704 	bacpy(&ev.addr.bdaddr, bdaddr);
4705 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4706 	ev.confirm_hint = confirm_hint;
4707 	ev.value = value;
4708 
4709 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4710 			  NULL);
4711 }
4712 
4713 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4714 			      u8 link_type, u8 addr_type)
4715 {
4716 	struct mgmt_ev_user_passkey_request ev;
4717 
4718 	BT_DBG("%s", hdev->name);
4719 
4720 	bacpy(&ev.addr.bdaddr, bdaddr);
4721 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4722 
4723 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4724 			  NULL);
4725 }
4726 
4727 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4728 				      u8 link_type, u8 addr_type, u8 status,
4729 				      u8 opcode)
4730 {
4731 	struct pending_cmd *cmd;
4732 	struct mgmt_rp_user_confirm_reply rp;
4733 	int err;
4734 
4735 	cmd = mgmt_pending_find(opcode, hdev);
4736 	if (!cmd)
4737 		return -ENOENT;
4738 
4739 	bacpy(&rp.addr.bdaddr, bdaddr);
4740 	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4741 	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4742 			   &rp, sizeof(rp));
4743 
4744 	mgmt_pending_remove(cmd);
4745 
4746 	return err;
4747 }
4748 
4749 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4750 				     u8 link_type, u8 addr_type, u8 status)
4751 {
4752 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4753 					  status, MGMT_OP_USER_CONFIRM_REPLY);
4754 }
4755 
4756 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4757 					 u8 link_type, u8 addr_type, u8 status)
4758 {
4759 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4760 					  status,
4761 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
4762 }
4763 
4764 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4765 				     u8 link_type, u8 addr_type, u8 status)
4766 {
4767 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4768 					  status, MGMT_OP_USER_PASSKEY_REPLY);
4769 }
4770 
4771 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4772 					 u8 link_type, u8 addr_type, u8 status)
4773 {
4774 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4775 					  status,
4776 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
4777 }
4778 
4779 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4780 			     u8 link_type, u8 addr_type, u32 passkey,
4781 			     u8 entered)
4782 {
4783 	struct mgmt_ev_passkey_notify ev;
4784 
4785 	BT_DBG("%s", hdev->name);
4786 
4787 	bacpy(&ev.addr.bdaddr, bdaddr);
4788 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4789 	ev.passkey = __cpu_to_le32(passkey);
4790 	ev.entered = entered;
4791 
4792 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4793 }
4794 
4795 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4796 		      u8 addr_type, u8 status)
4797 {
4798 	struct mgmt_ev_auth_failed ev;
4799 
4800 	bacpy(&ev.addr.bdaddr, bdaddr);
4801 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4802 	ev.status = mgmt_status(status);
4803 
4804 	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4805 }
4806 
4807 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4808 {
4809 	struct cmd_lookup match = { NULL, hdev };
4810 	bool changed;
4811 
4812 	if (status) {
4813 		u8 mgmt_err = mgmt_status(status);
4814 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4815 				     cmd_status_rsp, &mgmt_err);
4816 		return;
4817 	}
4818 
4819 	if (test_bit(HCI_AUTH, &hdev->flags))
4820 		changed = !test_and_set_bit(HCI_LINK_SECURITY,
4821 					    &hdev->dev_flags);
4822 	else
4823 		changed = test_and_clear_bit(HCI_LINK_SECURITY,
4824 					     &hdev->dev_flags);
4825 
4826 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4827 			     &match);
4828 
4829 	if (changed)
4830 		new_settings(hdev, match.sk);
4831 
4832 	if (match.sk)
4833 		sock_put(match.sk);
4834 }
4835 
4836 static void clear_eir(struct hci_request *req)
4837 {
4838 	struct hci_dev *hdev = req->hdev;
4839 	struct hci_cp_write_eir cp;
4840 
4841 	if (!lmp_ext_inq_capable(hdev))
4842 		return;
4843 
4844 	memset(hdev->eir, 0, sizeof(hdev->eir));
4845 
4846 	memset(&cp, 0, sizeof(cp));
4847 
4848 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4849 }
4850 
4851 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4852 {
4853 	struct cmd_lookup match = { NULL, hdev };
4854 	struct hci_request req;
4855 	bool changed = false;
4856 
4857 	if (status) {
4858 		u8 mgmt_err = mgmt_status(status);
4859 
4860 		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4861 						 &hdev->dev_flags)) {
4862 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4863 			new_settings(hdev, NULL);
4864 		}
4865 
4866 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4867 				     &mgmt_err);
4868 		return;
4869 	}
4870 
4871 	if (enable) {
4872 		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4873 	} else {
4874 		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4875 		if (!changed)
4876 			changed = test_and_clear_bit(HCI_HS_ENABLED,
4877 						     &hdev->dev_flags);
4878 		else
4879 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4880 	}
4881 
4882 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4883 
4884 	if (changed)
4885 		new_settings(hdev, match.sk);
4886 
4887 	if (match.sk)
4888 		sock_put(match.sk);
4889 
4890 	hci_req_init(&req, hdev);
4891 
4892 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4893 		update_eir(&req);
4894 	else
4895 		clear_eir(&req);
4896 
4897 	hci_req_run(&req, NULL);
4898 }
4899 
4900 static void sk_lookup(struct pending_cmd *cmd, void *data)
4901 {
4902 	struct cmd_lookup *match = data;
4903 
4904 	if (match->sk == NULL) {
4905 		match->sk = cmd->sk;
4906 		sock_hold(match->sk);
4907 	}
4908 }
4909 
4910 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4911 				    u8 status)
4912 {
4913 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4914 
4915 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4916 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4917 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4918 
4919 	if (!status)
4920 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
4921 			   NULL);
4922 
4923 	if (match.sk)
4924 		sock_put(match.sk);
4925 }
4926 
4927 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4928 {
4929 	struct mgmt_cp_set_local_name ev;
4930 	struct pending_cmd *cmd;
4931 
4932 	if (status)
4933 		return;
4934 
4935 	memset(&ev, 0, sizeof(ev));
4936 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4937 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4938 
4939 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4940 	if (!cmd) {
4941 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4942 
4943 		/* If this is a HCI command related to powering on the
4944 		 * HCI dev don't send any mgmt signals.
4945 		 */
4946 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4947 			return;
4948 	}
4949 
4950 	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4951 		   cmd ? cmd->sk : NULL);
4952 }
4953 
4954 void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4955 					     u8 *randomizer, u8 status)
4956 {
4957 	struct pending_cmd *cmd;
4958 
4959 	BT_DBG("%s status %u", hdev->name, status);
4960 
4961 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4962 	if (!cmd)
4963 		return;
4964 
4965 	if (status) {
4966 		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4967 			   mgmt_status(status));
4968 	} else {
4969 		struct mgmt_rp_read_local_oob_data rp;
4970 
4971 		memcpy(rp.hash, hash, sizeof(rp.hash));
4972 		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4973 
4974 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4975 			     0, &rp, sizeof(rp));
4976 	}
4977 
4978 	mgmt_pending_remove(cmd);
4979 }
4980 
4981 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4982 		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4983 		       ssp, u8 *eir, u16 eir_len)
4984 {
4985 	char buf[512];
4986 	struct mgmt_ev_device_found *ev = (void *) buf;
4987 	size_t ev_size;
4988 
4989 	if (!hci_discovery_active(hdev))
4990 		return;
4991 
4992 	/* Leave 5 bytes for a potential CoD field */
4993 	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4994 		return;
4995 
4996 	memset(buf, 0, sizeof(buf));
4997 
4998 	bacpy(&ev->addr.bdaddr, bdaddr);
4999 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5000 	ev->rssi = rssi;
5001 	if (cfm_name)
5002 		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5003 	if (!ssp)
5004 		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5005 
5006 	if (eir_len > 0)
5007 		memcpy(ev->eir, eir, eir_len);
5008 
5009 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5010 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5011 					  dev_class, 3);
5012 
5013 	ev->eir_len = cpu_to_le16(eir_len);
5014 	ev_size = sizeof(*ev) + eir_len;
5015 
5016 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5017 }
5018 
5019 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5020 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5021 {
5022 	struct mgmt_ev_device_found *ev;
5023 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5024 	u16 eir_len;
5025 
5026 	ev = (struct mgmt_ev_device_found *) buf;
5027 
5028 	memset(buf, 0, sizeof(buf));
5029 
5030 	bacpy(&ev->addr.bdaddr, bdaddr);
5031 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5032 	ev->rssi = rssi;
5033 
5034 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5035 				  name_len);
5036 
5037 	ev->eir_len = cpu_to_le16(eir_len);
5038 
5039 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5040 }
5041 
5042 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5043 {
5044 	struct mgmt_ev_discovering ev;
5045 	struct pending_cmd *cmd;
5046 
5047 	BT_DBG("%s discovering %u", hdev->name, discovering);
5048 
5049 	if (discovering)
5050 		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5051 	else
5052 		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5053 
5054 	if (cmd != NULL) {
5055 		u8 type = hdev->discovery.type;
5056 
5057 		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5058 			     sizeof(type));
5059 		mgmt_pending_remove(cmd);
5060 	}
5061 
5062 	memset(&ev, 0, sizeof(ev));
5063 	ev.type = hdev->discovery.type;
5064 	ev.discovering = discovering;
5065 
5066 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5067 }
5068 
5069 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5070 {
5071 	struct pending_cmd *cmd;
5072 	struct mgmt_ev_device_blocked ev;
5073 
5074 	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5075 
5076 	bacpy(&ev.addr.bdaddr, bdaddr);
5077 	ev.addr.type = type;
5078 
5079 	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5080 			  cmd ? cmd->sk : NULL);
5081 }
5082 
5083 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5084 {
5085 	struct pending_cmd *cmd;
5086 	struct mgmt_ev_device_unblocked ev;
5087 
5088 	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5089 
5090 	bacpy(&ev.addr.bdaddr, bdaddr);
5091 	ev.addr.type = type;
5092 
5093 	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5094 			  cmd ? cmd->sk : NULL);
5095 }
5096 
5097 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5098 {
5099 	BT_DBG("%s status %u", hdev->name, status);
5100 
5101 	/* Clear the advertising mgmt setting if we failed to re-enable it */
5102 	if (status) {
5103 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5104 		new_settings(hdev, NULL);
5105 	}
5106 }
5107 
5108 void mgmt_reenable_advertising(struct hci_dev *hdev)
5109 {
5110 	struct hci_request req;
5111 
5112 	if (hci_conn_num(hdev, LE_LINK) > 0)
5113 		return;
5114 
5115 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5116 		return;
5117 
5118 	hci_req_init(&req, hdev);
5119 	enable_advertising(&req);
5120 
5121 	/* If this fails we have no option but to let user space know
5122 	 * that we've disabled advertising.
5123 	 */
5124 	if (hci_req_run(&req, adv_enable_complete) < 0) {
5125 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5126 		new_settings(hdev, NULL);
5127 	}
5128 }
5129