xref: /openbmc/linux/net/bluetooth/mgmt.c (revision d3ba5586)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "smp.h"
36 
37 #define MGMT_VERSION	1
38 #define MGMT_REVISION	8
39 
40 static const u16 mgmt_commands[] = {
41 	MGMT_OP_READ_INDEX_LIST,
42 	MGMT_OP_READ_INFO,
43 	MGMT_OP_SET_POWERED,
44 	MGMT_OP_SET_DISCOVERABLE,
45 	MGMT_OP_SET_CONNECTABLE,
46 	MGMT_OP_SET_FAST_CONNECTABLE,
47 	MGMT_OP_SET_BONDABLE,
48 	MGMT_OP_SET_LINK_SECURITY,
49 	MGMT_OP_SET_SSP,
50 	MGMT_OP_SET_HS,
51 	MGMT_OP_SET_LE,
52 	MGMT_OP_SET_DEV_CLASS,
53 	MGMT_OP_SET_LOCAL_NAME,
54 	MGMT_OP_ADD_UUID,
55 	MGMT_OP_REMOVE_UUID,
56 	MGMT_OP_LOAD_LINK_KEYS,
57 	MGMT_OP_LOAD_LONG_TERM_KEYS,
58 	MGMT_OP_DISCONNECT,
59 	MGMT_OP_GET_CONNECTIONS,
60 	MGMT_OP_PIN_CODE_REPLY,
61 	MGMT_OP_PIN_CODE_NEG_REPLY,
62 	MGMT_OP_SET_IO_CAPABILITY,
63 	MGMT_OP_PAIR_DEVICE,
64 	MGMT_OP_CANCEL_PAIR_DEVICE,
65 	MGMT_OP_UNPAIR_DEVICE,
66 	MGMT_OP_USER_CONFIRM_REPLY,
67 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 	MGMT_OP_USER_PASSKEY_REPLY,
69 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 	MGMT_OP_READ_LOCAL_OOB_DATA,
71 	MGMT_OP_ADD_REMOTE_OOB_DATA,
72 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 	MGMT_OP_START_DISCOVERY,
74 	MGMT_OP_STOP_DISCOVERY,
75 	MGMT_OP_CONFIRM_NAME,
76 	MGMT_OP_BLOCK_DEVICE,
77 	MGMT_OP_UNBLOCK_DEVICE,
78 	MGMT_OP_SET_DEVICE_ID,
79 	MGMT_OP_SET_ADVERTISING,
80 	MGMT_OP_SET_BREDR,
81 	MGMT_OP_SET_STATIC_ADDRESS,
82 	MGMT_OP_SET_SCAN_PARAMS,
83 	MGMT_OP_SET_SECURE_CONN,
84 	MGMT_OP_SET_DEBUG_KEYS,
85 	MGMT_OP_SET_PRIVACY,
86 	MGMT_OP_LOAD_IRKS,
87 	MGMT_OP_GET_CONN_INFO,
88 	MGMT_OP_GET_CLOCK_INFO,
89 	MGMT_OP_ADD_DEVICE,
90 	MGMT_OP_REMOVE_DEVICE,
91 	MGMT_OP_LOAD_CONN_PARAM,
92 	MGMT_OP_READ_UNCONF_INDEX_LIST,
93 	MGMT_OP_READ_CONFIG_INFO,
94 	MGMT_OP_SET_EXTERNAL_CONFIG,
95 	MGMT_OP_SET_PUBLIC_ADDRESS,
96 	MGMT_OP_START_SERVICE_DISCOVERY,
97 };
98 
99 static const u16 mgmt_events[] = {
100 	MGMT_EV_CONTROLLER_ERROR,
101 	MGMT_EV_INDEX_ADDED,
102 	MGMT_EV_INDEX_REMOVED,
103 	MGMT_EV_NEW_SETTINGS,
104 	MGMT_EV_CLASS_OF_DEV_CHANGED,
105 	MGMT_EV_LOCAL_NAME_CHANGED,
106 	MGMT_EV_NEW_LINK_KEY,
107 	MGMT_EV_NEW_LONG_TERM_KEY,
108 	MGMT_EV_DEVICE_CONNECTED,
109 	MGMT_EV_DEVICE_DISCONNECTED,
110 	MGMT_EV_CONNECT_FAILED,
111 	MGMT_EV_PIN_CODE_REQUEST,
112 	MGMT_EV_USER_CONFIRM_REQUEST,
113 	MGMT_EV_USER_PASSKEY_REQUEST,
114 	MGMT_EV_AUTH_FAILED,
115 	MGMT_EV_DEVICE_FOUND,
116 	MGMT_EV_DISCOVERING,
117 	MGMT_EV_DEVICE_BLOCKED,
118 	MGMT_EV_DEVICE_UNBLOCKED,
119 	MGMT_EV_DEVICE_UNPAIRED,
120 	MGMT_EV_PASSKEY_NOTIFY,
121 	MGMT_EV_NEW_IRK,
122 	MGMT_EV_NEW_CSRK,
123 	MGMT_EV_DEVICE_ADDED,
124 	MGMT_EV_DEVICE_REMOVED,
125 	MGMT_EV_NEW_CONN_PARAM,
126 	MGMT_EV_UNCONF_INDEX_ADDED,
127 	MGMT_EV_UNCONF_INDEX_REMOVED,
128 	MGMT_EV_NEW_CONFIG_OPTIONS,
129 };
130 
131 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
132 
133 struct pending_cmd {
134 	struct list_head list;
135 	u16 opcode;
136 	int index;
137 	void *param;
138 	size_t param_len;
139 	struct sock *sk;
140 	void *user_data;
141 	void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
142 };
143 
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 	MGMT_STATUS_SUCCESS,
147 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
148 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
149 	MGMT_STATUS_FAILED,		/* Hardware Failure */
150 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
151 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
152 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
153 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
154 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
155 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
156 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
157 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
158 	MGMT_STATUS_BUSY,		/* Command Disallowed */
159 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
160 	MGMT_STATUS_REJECTED,		/* Rejected Security */
161 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
162 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
163 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
164 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
165 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
166 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
167 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
168 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
169 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
170 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
171 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
172 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
173 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
174 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
175 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
176 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
177 	MGMT_STATUS_FAILED,		/* Unspecified Error */
178 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
179 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
180 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
181 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
182 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
183 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
184 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
185 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
186 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
187 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
188 	MGMT_STATUS_FAILED,		/* Transaction Collision */
189 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
190 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
191 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
192 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
193 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
194 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
195 	MGMT_STATUS_FAILED,		/* Slot Violation */
196 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
197 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
198 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
199 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
200 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
201 	MGMT_STATUS_BUSY,		/* Controller Busy */
202 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
203 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
204 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
205 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
206 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
207 };
208 
209 static u8 mgmt_status(u8 hci_status)
210 {
211 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 		return mgmt_status_table[hci_status];
213 
214 	return MGMT_STATUS_FAILED;
215 }
216 
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 		      struct sock *skip_sk)
219 {
220 	struct sk_buff *skb;
221 	struct mgmt_hdr *hdr;
222 
223 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 	if (!skb)
225 		return -ENOMEM;
226 
227 	hdr = (void *) skb_put(skb, sizeof(*hdr));
228 	hdr->opcode = cpu_to_le16(event);
229 	if (hdev)
230 		hdr->index = cpu_to_le16(hdev->id);
231 	else
232 		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 	hdr->len = cpu_to_le16(data_len);
234 
235 	if (data)
236 		memcpy(skb_put(skb, data_len), data, data_len);
237 
238 	/* Time stamp */
239 	__net_timestamp(skb);
240 
241 	hci_send_to_control(skb, skip_sk);
242 	kfree_skb(skb);
243 
244 	return 0;
245 }
246 
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
248 {
249 	struct sk_buff *skb;
250 	struct mgmt_hdr *hdr;
251 	struct mgmt_ev_cmd_status *ev;
252 	int err;
253 
254 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255 
256 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 	if (!skb)
258 		return -ENOMEM;
259 
260 	hdr = (void *) skb_put(skb, sizeof(*hdr));
261 
262 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 	hdr->index = cpu_to_le16(index);
264 	hdr->len = cpu_to_le16(sizeof(*ev));
265 
266 	ev = (void *) skb_put(skb, sizeof(*ev));
267 	ev->status = status;
268 	ev->opcode = cpu_to_le16(cmd);
269 
270 	err = sock_queue_rcv_skb(sk, skb);
271 	if (err < 0)
272 		kfree_skb(skb);
273 
274 	return err;
275 }
276 
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 			void *rp, size_t rp_len)
279 {
280 	struct sk_buff *skb;
281 	struct mgmt_hdr *hdr;
282 	struct mgmt_ev_cmd_complete *ev;
283 	int err;
284 
285 	BT_DBG("sock %p", sk);
286 
287 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 	if (!skb)
289 		return -ENOMEM;
290 
291 	hdr = (void *) skb_put(skb, sizeof(*hdr));
292 
293 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 	hdr->index = cpu_to_le16(index);
295 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296 
297 	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 	ev->opcode = cpu_to_le16(cmd);
299 	ev->status = status;
300 
301 	if (rp)
302 		memcpy(ev->data, rp, rp_len);
303 
304 	err = sock_queue_rcv_skb(sk, skb);
305 	if (err < 0)
306 		kfree_skb(skb);
307 
308 	return err;
309 }
310 
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 			u16 data_len)
313 {
314 	struct mgmt_rp_read_version rp;
315 
316 	BT_DBG("sock %p", sk);
317 
318 	rp.version = MGMT_VERSION;
319 	rp.revision = cpu_to_le16(MGMT_REVISION);
320 
321 	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 			    sizeof(rp));
323 }
324 
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 			 u16 data_len)
327 {
328 	struct mgmt_rp_read_commands *rp;
329 	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 	const u16 num_events = ARRAY_SIZE(mgmt_events);
331 	__le16 *opcode;
332 	size_t rp_size;
333 	int i, err;
334 
335 	BT_DBG("sock %p", sk);
336 
337 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338 
339 	rp = kmalloc(rp_size, GFP_KERNEL);
340 	if (!rp)
341 		return -ENOMEM;
342 
343 	rp->num_commands = cpu_to_le16(num_commands);
344 	rp->num_events = cpu_to_le16(num_events);
345 
346 	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 		put_unaligned_le16(mgmt_commands[i], opcode);
348 
349 	for (i = 0; i < num_events; i++, opcode++)
350 		put_unaligned_le16(mgmt_events[i], opcode);
351 
352 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 			   rp_size);
354 	kfree(rp);
355 
356 	return err;
357 }
358 
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 			   u16 data_len)
361 {
362 	struct mgmt_rp_read_index_list *rp;
363 	struct hci_dev *d;
364 	size_t rp_len;
365 	u16 count;
366 	int err;
367 
368 	BT_DBG("sock %p", sk);
369 
370 	read_lock(&hci_dev_list_lock);
371 
372 	count = 0;
373 	list_for_each_entry(d, &hci_dev_list, list) {
374 		if (d->dev_type == HCI_BREDR &&
375 		    !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 			count++;
377 	}
378 
379 	rp_len = sizeof(*rp) + (2 * count);
380 	rp = kmalloc(rp_len, GFP_ATOMIC);
381 	if (!rp) {
382 		read_unlock(&hci_dev_list_lock);
383 		return -ENOMEM;
384 	}
385 
386 	count = 0;
387 	list_for_each_entry(d, &hci_dev_list, list) {
388 		if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 		    test_bit(HCI_CONFIG, &d->dev_flags) ||
390 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 			continue;
392 
393 		/* Devices marked as raw-only are neither configured
394 		 * nor unconfigured controllers.
395 		 */
396 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 			continue;
398 
399 		if (d->dev_type == HCI_BREDR &&
400 		    !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 			rp->index[count++] = cpu_to_le16(d->id);
402 			BT_DBG("Added hci%u", d->id);
403 		}
404 	}
405 
406 	rp->num_controllers = cpu_to_le16(count);
407 	rp_len = sizeof(*rp) + (2 * count);
408 
409 	read_unlock(&hci_dev_list_lock);
410 
411 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 			   rp_len);
413 
414 	kfree(rp);
415 
416 	return err;
417 }
418 
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 				  void *data, u16 data_len)
421 {
422 	struct mgmt_rp_read_unconf_index_list *rp;
423 	struct hci_dev *d;
424 	size_t rp_len;
425 	u16 count;
426 	int err;
427 
428 	BT_DBG("sock %p", sk);
429 
430 	read_lock(&hci_dev_list_lock);
431 
432 	count = 0;
433 	list_for_each_entry(d, &hci_dev_list, list) {
434 		if (d->dev_type == HCI_BREDR &&
435 		    test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 			count++;
437 	}
438 
439 	rp_len = sizeof(*rp) + (2 * count);
440 	rp = kmalloc(rp_len, GFP_ATOMIC);
441 	if (!rp) {
442 		read_unlock(&hci_dev_list_lock);
443 		return -ENOMEM;
444 	}
445 
446 	count = 0;
447 	list_for_each_entry(d, &hci_dev_list, list) {
448 		if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 		    test_bit(HCI_CONFIG, &d->dev_flags) ||
450 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 			continue;
452 
453 		/* Devices marked as raw-only are neither configured
454 		 * nor unconfigured controllers.
455 		 */
456 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 			continue;
458 
459 		if (d->dev_type == HCI_BREDR &&
460 		    test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 			rp->index[count++] = cpu_to_le16(d->id);
462 			BT_DBG("Added hci%u", d->id);
463 		}
464 	}
465 
466 	rp->num_controllers = cpu_to_le16(count);
467 	rp_len = sizeof(*rp) + (2 * count);
468 
469 	read_unlock(&hci_dev_list_lock);
470 
471 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 			   0, rp, rp_len);
473 
474 	kfree(rp);
475 
476 	return err;
477 }
478 
479 static bool is_configured(struct hci_dev *hdev)
480 {
481 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 	    !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 		return false;
484 
485 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
487 		return false;
488 
489 	return true;
490 }
491 
492 static __le32 get_missing_options(struct hci_dev *hdev)
493 {
494 	u32 options = 0;
495 
496 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 	    !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
499 
500 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
502 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
503 
504 	return cpu_to_le32(options);
505 }
506 
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
508 {
509 	__le32 options = get_missing_options(hdev);
510 
511 	return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 			  sizeof(options), skip);
513 }
514 
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516 {
517 	__le32 options = get_missing_options(hdev);
518 
519 	return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 			    sizeof(options));
521 }
522 
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 			    void *data, u16 data_len)
525 {
526 	struct mgmt_rp_read_config_info rp;
527 	u32 options = 0;
528 
529 	BT_DBG("sock %p %s", sk, hdev->name);
530 
531 	hci_dev_lock(hdev);
532 
533 	memset(&rp, 0, sizeof(rp));
534 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535 
536 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
538 
539 	if (hdev->set_bdaddr)
540 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
541 
542 	rp.supported_options = cpu_to_le32(options);
543 	rp.missing_options = get_missing_options(hdev);
544 
545 	hci_dev_unlock(hdev);
546 
547 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 			    sizeof(rp));
549 }
550 
551 static u32 get_supported_settings(struct hci_dev *hdev)
552 {
553 	u32 settings = 0;
554 
555 	settings |= MGMT_SETTING_POWERED;
556 	settings |= MGMT_SETTING_BONDABLE;
557 	settings |= MGMT_SETTING_DEBUG_KEYS;
558 	settings |= MGMT_SETTING_CONNECTABLE;
559 	settings |= MGMT_SETTING_DISCOVERABLE;
560 
561 	if (lmp_bredr_capable(hdev)) {
562 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 		settings |= MGMT_SETTING_BREDR;
565 		settings |= MGMT_SETTING_LINK_SECURITY;
566 
567 		if (lmp_ssp_capable(hdev)) {
568 			settings |= MGMT_SETTING_SSP;
569 			settings |= MGMT_SETTING_HS;
570 		}
571 
572 		if (lmp_sc_capable(hdev) ||
573 		    test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 			settings |= MGMT_SETTING_SECURE_CONN;
575 	}
576 
577 	if (lmp_le_capable(hdev)) {
578 		settings |= MGMT_SETTING_LE;
579 		settings |= MGMT_SETTING_ADVERTISING;
580 		settings |= MGMT_SETTING_SECURE_CONN;
581 		settings |= MGMT_SETTING_PRIVACY;
582 	}
583 
584 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
585 	    hdev->set_bdaddr)
586 		settings |= MGMT_SETTING_CONFIGURATION;
587 
588 	return settings;
589 }
590 
591 static u32 get_current_settings(struct hci_dev *hdev)
592 {
593 	u32 settings = 0;
594 
595 	if (hdev_is_powered(hdev))
596 		settings |= MGMT_SETTING_POWERED;
597 
598 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 		settings |= MGMT_SETTING_CONNECTABLE;
600 
601 	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
603 
604 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 		settings |= MGMT_SETTING_DISCOVERABLE;
606 
607 	if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 		settings |= MGMT_SETTING_BONDABLE;
609 
610 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 		settings |= MGMT_SETTING_BREDR;
612 
613 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 		settings |= MGMT_SETTING_LE;
615 
616 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 		settings |= MGMT_SETTING_LINK_SECURITY;
618 
619 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 		settings |= MGMT_SETTING_SSP;
621 
622 	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 		settings |= MGMT_SETTING_HS;
624 
625 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 		settings |= MGMT_SETTING_ADVERTISING;
627 
628 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 		settings |= MGMT_SETTING_SECURE_CONN;
630 
631 	if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 		settings |= MGMT_SETTING_DEBUG_KEYS;
633 
634 	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 		settings |= MGMT_SETTING_PRIVACY;
636 
637 	return settings;
638 }
639 
640 #define PNP_INFO_SVCLASS_ID		0x1200
641 
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
643 {
644 	u8 *ptr = data, *uuids_start = NULL;
645 	struct bt_uuid *uuid;
646 
647 	if (len < 4)
648 		return ptr;
649 
650 	list_for_each_entry(uuid, &hdev->uuids, list) {
651 		u16 uuid16;
652 
653 		if (uuid->size != 16)
654 			continue;
655 
656 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
657 		if (uuid16 < 0x1100)
658 			continue;
659 
660 		if (uuid16 == PNP_INFO_SVCLASS_ID)
661 			continue;
662 
663 		if (!uuids_start) {
664 			uuids_start = ptr;
665 			uuids_start[0] = 1;
666 			uuids_start[1] = EIR_UUID16_ALL;
667 			ptr += 2;
668 		}
669 
670 		/* Stop if not enough space to put next UUID */
671 		if ((ptr - data) + sizeof(u16) > len) {
672 			uuids_start[1] = EIR_UUID16_SOME;
673 			break;
674 		}
675 
676 		*ptr++ = (uuid16 & 0x00ff);
677 		*ptr++ = (uuid16 & 0xff00) >> 8;
678 		uuids_start[0] += sizeof(uuid16);
679 	}
680 
681 	return ptr;
682 }
683 
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
685 {
686 	u8 *ptr = data, *uuids_start = NULL;
687 	struct bt_uuid *uuid;
688 
689 	if (len < 6)
690 		return ptr;
691 
692 	list_for_each_entry(uuid, &hdev->uuids, list) {
693 		if (uuid->size != 32)
694 			continue;
695 
696 		if (!uuids_start) {
697 			uuids_start = ptr;
698 			uuids_start[0] = 1;
699 			uuids_start[1] = EIR_UUID32_ALL;
700 			ptr += 2;
701 		}
702 
703 		/* Stop if not enough space to put next UUID */
704 		if ((ptr - data) + sizeof(u32) > len) {
705 			uuids_start[1] = EIR_UUID32_SOME;
706 			break;
707 		}
708 
709 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
710 		ptr += sizeof(u32);
711 		uuids_start[0] += sizeof(u32);
712 	}
713 
714 	return ptr;
715 }
716 
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 {
719 	u8 *ptr = data, *uuids_start = NULL;
720 	struct bt_uuid *uuid;
721 
722 	if (len < 18)
723 		return ptr;
724 
725 	list_for_each_entry(uuid, &hdev->uuids, list) {
726 		if (uuid->size != 128)
727 			continue;
728 
729 		if (!uuids_start) {
730 			uuids_start = ptr;
731 			uuids_start[0] = 1;
732 			uuids_start[1] = EIR_UUID128_ALL;
733 			ptr += 2;
734 		}
735 
736 		/* Stop if not enough space to put next UUID */
737 		if ((ptr - data) + 16 > len) {
738 			uuids_start[1] = EIR_UUID128_SOME;
739 			break;
740 		}
741 
742 		memcpy(ptr, uuid->uuid, 16);
743 		ptr += 16;
744 		uuids_start[0] += 16;
745 	}
746 
747 	return ptr;
748 }
749 
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
751 {
752 	struct pending_cmd *cmd;
753 
754 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 		if (cmd->opcode == opcode)
756 			return cmd;
757 	}
758 
759 	return NULL;
760 }
761 
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 						  struct hci_dev *hdev,
764 						  const void *data)
765 {
766 	struct pending_cmd *cmd;
767 
768 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 		if (cmd->user_data != data)
770 			continue;
771 		if (cmd->opcode == opcode)
772 			return cmd;
773 	}
774 
775 	return NULL;
776 }
777 
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
779 {
780 	u8 ad_len = 0;
781 	size_t name_len;
782 
783 	name_len = strlen(hdev->dev_name);
784 	if (name_len > 0) {
785 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
786 
787 		if (name_len > max_len) {
788 			name_len = max_len;
789 			ptr[1] = EIR_NAME_SHORT;
790 		} else
791 			ptr[1] = EIR_NAME_COMPLETE;
792 
793 		ptr[0] = name_len + 1;
794 
795 		memcpy(ptr + 2, hdev->dev_name, name_len);
796 
797 		ad_len += (name_len + 2);
798 		ptr += (name_len + 2);
799 	}
800 
801 	return ad_len;
802 }
803 
804 static void update_scan_rsp_data(struct hci_request *req)
805 {
806 	struct hci_dev *hdev = req->hdev;
807 	struct hci_cp_le_set_scan_rsp_data cp;
808 	u8 len;
809 
810 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
811 		return;
812 
813 	memset(&cp, 0, sizeof(cp));
814 
815 	len = create_scan_rsp_data(hdev, cp.data);
816 
817 	if (hdev->scan_rsp_data_len == len &&
818 	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
819 		return;
820 
821 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 	hdev->scan_rsp_data_len = len;
823 
824 	cp.length = len;
825 
826 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
827 }
828 
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
830 {
831 	struct pending_cmd *cmd;
832 
833 	/* If there's a pending mgmt command the flags will not yet have
834 	 * their final values, so check for this first.
835 	 */
836 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
837 	if (cmd) {
838 		struct mgmt_mode *cp = cmd->param;
839 		if (cp->val == 0x01)
840 			return LE_AD_GENERAL;
841 		else if (cp->val == 0x02)
842 			return LE_AD_LIMITED;
843 	} else {
844 		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 			return LE_AD_LIMITED;
846 		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 			return LE_AD_GENERAL;
848 	}
849 
850 	return 0;
851 }
852 
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
854 {
855 	u8 ad_len = 0, flags = 0;
856 
857 	flags |= get_adv_discov_flags(hdev);
858 
859 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 		flags |= LE_AD_NO_BREDR;
861 
862 	if (flags) {
863 		BT_DBG("adv flags 0x%02x", flags);
864 
865 		ptr[0] = 2;
866 		ptr[1] = EIR_FLAGS;
867 		ptr[2] = flags;
868 
869 		ad_len += 3;
870 		ptr += 3;
871 	}
872 
873 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
874 		ptr[0] = 2;
875 		ptr[1] = EIR_TX_POWER;
876 		ptr[2] = (u8) hdev->adv_tx_power;
877 
878 		ad_len += 3;
879 		ptr += 3;
880 	}
881 
882 	return ad_len;
883 }
884 
885 static void update_adv_data(struct hci_request *req)
886 {
887 	struct hci_dev *hdev = req->hdev;
888 	struct hci_cp_le_set_adv_data cp;
889 	u8 len;
890 
891 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
892 		return;
893 
894 	memset(&cp, 0, sizeof(cp));
895 
896 	len = create_adv_data(hdev, cp.data);
897 
898 	if (hdev->adv_data_len == len &&
899 	    memcmp(cp.data, hdev->adv_data, len) == 0)
900 		return;
901 
902 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 	hdev->adv_data_len = len;
904 
905 	cp.length = len;
906 
907 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
908 }
909 
910 int mgmt_update_adv_data(struct hci_dev *hdev)
911 {
912 	struct hci_request req;
913 
914 	hci_req_init(&req, hdev);
915 	update_adv_data(&req);
916 
917 	return hci_req_run(&req, NULL);
918 }
919 
920 static void create_eir(struct hci_dev *hdev, u8 *data)
921 {
922 	u8 *ptr = data;
923 	size_t name_len;
924 
925 	name_len = strlen(hdev->dev_name);
926 
927 	if (name_len > 0) {
928 		/* EIR Data type */
929 		if (name_len > 48) {
930 			name_len = 48;
931 			ptr[1] = EIR_NAME_SHORT;
932 		} else
933 			ptr[1] = EIR_NAME_COMPLETE;
934 
935 		/* EIR Data length */
936 		ptr[0] = name_len + 1;
937 
938 		memcpy(ptr + 2, hdev->dev_name, name_len);
939 
940 		ptr += (name_len + 2);
941 	}
942 
943 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
944 		ptr[0] = 2;
945 		ptr[1] = EIR_TX_POWER;
946 		ptr[2] = (u8) hdev->inq_tx_power;
947 
948 		ptr += 3;
949 	}
950 
951 	if (hdev->devid_source > 0) {
952 		ptr[0] = 9;
953 		ptr[1] = EIR_DEVICE_ID;
954 
955 		put_unaligned_le16(hdev->devid_source, ptr + 2);
956 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 		put_unaligned_le16(hdev->devid_product, ptr + 6);
958 		put_unaligned_le16(hdev->devid_version, ptr + 8);
959 
960 		ptr += 10;
961 	}
962 
963 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
966 }
967 
968 static void update_eir(struct hci_request *req)
969 {
970 	struct hci_dev *hdev = req->hdev;
971 	struct hci_cp_write_eir cp;
972 
973 	if (!hdev_is_powered(hdev))
974 		return;
975 
976 	if (!lmp_ext_inq_capable(hdev))
977 		return;
978 
979 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
980 		return;
981 
982 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
983 		return;
984 
985 	memset(&cp, 0, sizeof(cp));
986 
987 	create_eir(hdev, cp.data);
988 
989 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
990 		return;
991 
992 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
993 
994 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
995 }
996 
997 static u8 get_service_classes(struct hci_dev *hdev)
998 {
999 	struct bt_uuid *uuid;
1000 	u8 val = 0;
1001 
1002 	list_for_each_entry(uuid, &hdev->uuids, list)
1003 		val |= uuid->svc_hint;
1004 
1005 	return val;
1006 }
1007 
1008 static void update_class(struct hci_request *req)
1009 {
1010 	struct hci_dev *hdev = req->hdev;
1011 	u8 cod[3];
1012 
1013 	BT_DBG("%s", hdev->name);
1014 
1015 	if (!hdev_is_powered(hdev))
1016 		return;
1017 
1018 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1019 		return;
1020 
1021 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1022 		return;
1023 
1024 	cod[0] = hdev->minor_class;
1025 	cod[1] = hdev->major_class;
1026 	cod[2] = get_service_classes(hdev);
1027 
1028 	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1029 		cod[1] |= 0x20;
1030 
1031 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1032 		return;
1033 
1034 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1035 }
1036 
1037 static bool get_connectable(struct hci_dev *hdev)
1038 {
1039 	struct pending_cmd *cmd;
1040 
1041 	/* If there's a pending mgmt command the flag will not yet have
1042 	 * it's final value, so check for this first.
1043 	 */
1044 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1045 	if (cmd) {
1046 		struct mgmt_mode *cp = cmd->param;
1047 		return cp->val;
1048 	}
1049 
1050 	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1051 }
1052 
1053 static void disable_advertising(struct hci_request *req)
1054 {
1055 	u8 enable = 0x00;
1056 
1057 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1058 }
1059 
1060 static void enable_advertising(struct hci_request *req)
1061 {
1062 	struct hci_dev *hdev = req->hdev;
1063 	struct hci_cp_le_set_adv_param cp;
1064 	u8 own_addr_type, enable = 0x01;
1065 	bool connectable;
1066 
1067 	if (hci_conn_num(hdev, LE_LINK) > 0)
1068 		return;
1069 
1070 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 		disable_advertising(req);
1072 
1073 	/* Clear the HCI_LE_ADV bit temporarily so that the
1074 	 * hci_update_random_address knows that it's safe to go ahead
1075 	 * and write a new random address. The flag will be set back on
1076 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 	 */
1078 	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1079 
1080 	connectable = get_connectable(hdev);
1081 
1082 	/* Set require_privacy to true only when non-connectable
1083 	 * advertising is used. In that case it is fine to use a
1084 	 * non-resolvable private address.
1085 	 */
1086 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1087 		return;
1088 
1089 	memset(&cp, 0, sizeof(cp));
1090 	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 	cp.own_address_type = own_addr_type;
1094 	cp.channel_map = hdev->le_adv_channel_map;
1095 
1096 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1097 
1098 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1099 }
1100 
1101 static void service_cache_off(struct work_struct *work)
1102 {
1103 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 					    service_cache.work);
1105 	struct hci_request req;
1106 
1107 	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1108 		return;
1109 
1110 	hci_req_init(&req, hdev);
1111 
1112 	hci_dev_lock(hdev);
1113 
1114 	update_eir(&req);
1115 	update_class(&req);
1116 
1117 	hci_dev_unlock(hdev);
1118 
1119 	hci_req_run(&req, NULL);
1120 }
1121 
1122 static void rpa_expired(struct work_struct *work)
1123 {
1124 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1125 					    rpa_expired.work);
1126 	struct hci_request req;
1127 
1128 	BT_DBG("");
1129 
1130 	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1131 
1132 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1133 		return;
1134 
1135 	/* The generation of a new RPA and programming it into the
1136 	 * controller happens in the enable_advertising() function.
1137 	 */
1138 	hci_req_init(&req, hdev);
1139 	enable_advertising(&req);
1140 	hci_req_run(&req, NULL);
1141 }
1142 
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1144 {
1145 	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1146 		return;
1147 
1148 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1150 
1151 	/* Non-mgmt controlled devices get this bit set
1152 	 * implicitly so that pairing works for them, however
1153 	 * for mgmt we require user-space to explicitly enable
1154 	 * it
1155 	 */
1156 	clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1157 }
1158 
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 				void *data, u16 data_len)
1161 {
1162 	struct mgmt_rp_read_info rp;
1163 
1164 	BT_DBG("sock %p %s", sk, hdev->name);
1165 
1166 	hci_dev_lock(hdev);
1167 
1168 	memset(&rp, 0, sizeof(rp));
1169 
1170 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1171 
1172 	rp.version = hdev->hci_ver;
1173 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1174 
1175 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1177 
1178 	memcpy(rp.dev_class, hdev->dev_class, 3);
1179 
1180 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1182 
1183 	hci_dev_unlock(hdev);
1184 
1185 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1186 			    sizeof(rp));
1187 }
1188 
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1190 {
1191 	sock_put(cmd->sk);
1192 	kfree(cmd->param);
1193 	kfree(cmd);
1194 }
1195 
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 					    struct hci_dev *hdev, void *data,
1198 					    u16 len)
1199 {
1200 	struct pending_cmd *cmd;
1201 
1202 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1203 	if (!cmd)
1204 		return NULL;
1205 
1206 	cmd->opcode = opcode;
1207 	cmd->index = hdev->id;
1208 
1209 	cmd->param = kmemdup(data, len, GFP_KERNEL);
1210 	if (!cmd->param) {
1211 		kfree(cmd);
1212 		return NULL;
1213 	}
1214 
1215 	cmd->param_len = len;
1216 
1217 	cmd->sk = sk;
1218 	sock_hold(sk);
1219 
1220 	list_add(&cmd->list, &hdev->mgmt_pending);
1221 
1222 	return cmd;
1223 }
1224 
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 				 void (*cb)(struct pending_cmd *cmd,
1227 					    void *data),
1228 				 void *data)
1229 {
1230 	struct pending_cmd *cmd, *tmp;
1231 
1232 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 		if (opcode > 0 && cmd->opcode != opcode)
1234 			continue;
1235 
1236 		cb(cmd, data);
1237 	}
1238 }
1239 
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1241 {
1242 	list_del(&cmd->list);
1243 	mgmt_pending_free(cmd);
1244 }
1245 
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247 {
1248 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1249 
1250 	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 			    sizeof(settings));
1252 }
1253 
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1255 {
1256 	BT_DBG("%s status 0x%02x", hdev->name, status);
1257 
1258 	if (hci_conn_count(hdev) == 0) {
1259 		cancel_delayed_work(&hdev->power_off);
1260 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1261 	}
1262 }
1263 
1264 static bool hci_stop_discovery(struct hci_request *req)
1265 {
1266 	struct hci_dev *hdev = req->hdev;
1267 	struct hci_cp_remote_name_req_cancel cp;
1268 	struct inquiry_entry *e;
1269 
1270 	switch (hdev->discovery.state) {
1271 	case DISCOVERY_FINDING:
1272 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1274 		} else {
1275 			cancel_delayed_work(&hdev->le_scan_disable);
1276 			hci_req_add_le_scan_disable(req);
1277 		}
1278 
1279 		return true;
1280 
1281 	case DISCOVERY_RESOLVING:
1282 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1283 						     NAME_PENDING);
1284 		if (!e)
1285 			break;
1286 
1287 		bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1289 			    &cp);
1290 
1291 		return true;
1292 
1293 	default:
1294 		/* Passive scanning */
1295 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 			hci_req_add_le_scan_disable(req);
1297 			return true;
1298 		}
1299 
1300 		break;
1301 	}
1302 
1303 	return false;
1304 }
1305 
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1307 {
1308 	struct hci_request req;
1309 	struct hci_conn *conn;
1310 	bool discov_stopped;
1311 	int err;
1312 
1313 	hci_req_init(&req, hdev);
1314 
1315 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1317 		u8 scan = 0x00;
1318 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1319 	}
1320 
1321 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 		disable_advertising(&req);
1323 
1324 	discov_stopped = hci_stop_discovery(&req);
1325 
1326 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 		struct hci_cp_disconnect dc;
1328 		struct hci_cp_reject_conn_req rej;
1329 
1330 		switch (conn->state) {
1331 		case BT_CONNECTED:
1332 		case BT_CONFIG:
1333 			dc.handle = cpu_to_le16(conn->handle);
1334 			dc.reason = 0x15; /* Terminated due to Power Off */
1335 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336 			break;
1337 		case BT_CONNECT:
1338 			if (conn->type == LE_LINK)
1339 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1340 					    0, NULL);
1341 			else if (conn->type == ACL_LINK)
1342 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1343 					    6, &conn->dst);
1344 			break;
1345 		case BT_CONNECT2:
1346 			bacpy(&rej.bdaddr, &conn->dst);
1347 			rej.reason = 0x15; /* Terminated due to Power Off */
1348 			if (conn->type == ACL_LINK)
1349 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1350 					    sizeof(rej), &rej);
1351 			else if (conn->type == SCO_LINK)
1352 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1353 					    sizeof(rej), &rej);
1354 			break;
1355 		}
1356 	}
1357 
1358 	err = hci_req_run(&req, clean_up_hci_complete);
1359 	if (!err && discov_stopped)
1360 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1361 
1362 	return err;
1363 }
1364 
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366 		       u16 len)
1367 {
1368 	struct mgmt_mode *cp = data;
1369 	struct pending_cmd *cmd;
1370 	int err;
1371 
1372 	BT_DBG("request for %s", hdev->name);
1373 
1374 	if (cp->val != 0x00 && cp->val != 0x01)
1375 		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 				  MGMT_STATUS_INVALID_PARAMS);
1377 
1378 	hci_dev_lock(hdev);
1379 
1380 	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382 				 MGMT_STATUS_BUSY);
1383 		goto failed;
1384 	}
1385 
1386 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 		cancel_delayed_work(&hdev->power_off);
1388 
1389 		if (cp->val) {
1390 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1391 					 data, len);
1392 			err = mgmt_powered(hdev, 1);
1393 			goto failed;
1394 		}
1395 	}
1396 
1397 	if (!!cp->val == hdev_is_powered(hdev)) {
1398 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 		goto failed;
1400 	}
1401 
1402 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 	if (!cmd) {
1404 		err = -ENOMEM;
1405 		goto failed;
1406 	}
1407 
1408 	if (cp->val) {
1409 		queue_work(hdev->req_workqueue, &hdev->power_on);
1410 		err = 0;
1411 	} else {
1412 		/* Disconnect connections, stop scans, etc */
1413 		err = clean_up_hci_state(hdev);
1414 		if (!err)
1415 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 					   HCI_POWER_OFF_TIMEOUT);
1417 
1418 		/* ENODATA means there were no HCI commands queued */
1419 		if (err == -ENODATA) {
1420 			cancel_delayed_work(&hdev->power_off);
1421 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1422 			err = 0;
1423 		}
1424 	}
1425 
1426 failed:
1427 	hci_dev_unlock(hdev);
1428 	return err;
1429 }
1430 
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1432 {
1433 	__le32 ev;
1434 
1435 	ev = cpu_to_le32(get_current_settings(hdev));
1436 
1437 	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1438 }
1439 
1440 int mgmt_new_settings(struct hci_dev *hdev)
1441 {
1442 	return new_settings(hdev, NULL);
1443 }
1444 
1445 struct cmd_lookup {
1446 	struct sock *sk;
1447 	struct hci_dev *hdev;
1448 	u8 mgmt_status;
1449 };
1450 
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1452 {
1453 	struct cmd_lookup *match = data;
1454 
1455 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1456 
1457 	list_del(&cmd->list);
1458 
1459 	if (match->sk == NULL) {
1460 		match->sk = cmd->sk;
1461 		sock_hold(match->sk);
1462 	}
1463 
1464 	mgmt_pending_free(cmd);
1465 }
1466 
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1468 {
1469 	u8 *status = data;
1470 
1471 	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 	mgmt_pending_remove(cmd);
1473 }
1474 
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1476 {
1477 	if (cmd->cmd_complete) {
1478 		u8 *status = data;
1479 
1480 		cmd->cmd_complete(cmd, *status);
1481 		mgmt_pending_remove(cmd);
1482 
1483 		return;
1484 	}
1485 
1486 	cmd_status_rsp(cmd, data);
1487 }
1488 
1489 static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1490 {
1491 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1492 		     cmd->param_len);
1493 }
1494 
1495 static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1496 {
1497 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 		     sizeof(struct mgmt_addr_info));
1499 }
1500 
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1502 {
1503 	if (!lmp_bredr_capable(hdev))
1504 		return MGMT_STATUS_NOT_SUPPORTED;
1505 	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 		return MGMT_STATUS_REJECTED;
1507 	else
1508 		return MGMT_STATUS_SUCCESS;
1509 }
1510 
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1512 {
1513 	if (!lmp_le_capable(hdev))
1514 		return MGMT_STATUS_NOT_SUPPORTED;
1515 	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 		return MGMT_STATUS_REJECTED;
1517 	else
1518 		return MGMT_STATUS_SUCCESS;
1519 }
1520 
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1522 {
1523 	struct pending_cmd *cmd;
1524 	struct mgmt_mode *cp;
1525 	struct hci_request req;
1526 	bool changed;
1527 
1528 	BT_DBG("status 0x%02x", status);
1529 
1530 	hci_dev_lock(hdev);
1531 
1532 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1533 	if (!cmd)
1534 		goto unlock;
1535 
1536 	if (status) {
1537 		u8 mgmt_err = mgmt_status(status);
1538 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1539 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1540 		goto remove_cmd;
1541 	}
1542 
1543 	cp = cmd->param;
1544 	if (cp->val) {
1545 		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1546 					    &hdev->dev_flags);
1547 
1548 		if (hdev->discov_timeout > 0) {
1549 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1550 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1551 					   to);
1552 		}
1553 	} else {
1554 		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1555 					     &hdev->dev_flags);
1556 	}
1557 
1558 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1559 
1560 	if (changed)
1561 		new_settings(hdev, cmd->sk);
1562 
1563 	/* When the discoverable mode gets changed, make sure
1564 	 * that class of device has the limited discoverable
1565 	 * bit correctly set. Also update page scan based on whitelist
1566 	 * entries.
1567 	 */
1568 	hci_req_init(&req, hdev);
1569 	hci_update_page_scan(hdev, &req);
1570 	update_class(&req);
1571 	hci_req_run(&req, NULL);
1572 
1573 remove_cmd:
1574 	mgmt_pending_remove(cmd);
1575 
1576 unlock:
1577 	hci_dev_unlock(hdev);
1578 }
1579 
1580 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1581 			    u16 len)
1582 {
1583 	struct mgmt_cp_set_discoverable *cp = data;
1584 	struct pending_cmd *cmd;
1585 	struct hci_request req;
1586 	u16 timeout;
1587 	u8 scan;
1588 	int err;
1589 
1590 	BT_DBG("request for %s", hdev->name);
1591 
1592 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1593 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 				  MGMT_STATUS_REJECTED);
1596 
1597 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1598 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 				  MGMT_STATUS_INVALID_PARAMS);
1600 
1601 	timeout = __le16_to_cpu(cp->timeout);
1602 
1603 	/* Disabling discoverable requires that no timeout is set,
1604 	 * and enabling limited discoverable requires a timeout.
1605 	 */
1606 	if ((cp->val == 0x00 && timeout > 0) ||
1607 	    (cp->val == 0x02 && timeout == 0))
1608 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 				  MGMT_STATUS_INVALID_PARAMS);
1610 
1611 	hci_dev_lock(hdev);
1612 
1613 	if (!hdev_is_powered(hdev) && timeout > 0) {
1614 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1615 				 MGMT_STATUS_NOT_POWERED);
1616 		goto failed;
1617 	}
1618 
1619 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1620 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1621 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1622 				 MGMT_STATUS_BUSY);
1623 		goto failed;
1624 	}
1625 
1626 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1627 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1628 				 MGMT_STATUS_REJECTED);
1629 		goto failed;
1630 	}
1631 
1632 	if (!hdev_is_powered(hdev)) {
1633 		bool changed = false;
1634 
1635 		/* Setting limited discoverable when powered off is
1636 		 * not a valid operation since it requires a timeout
1637 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1638 		 */
1639 		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1640 			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1641 			changed = true;
1642 		}
1643 
1644 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1645 		if (err < 0)
1646 			goto failed;
1647 
1648 		if (changed)
1649 			err = new_settings(hdev, sk);
1650 
1651 		goto failed;
1652 	}
1653 
1654 	/* If the current mode is the same, then just update the timeout
1655 	 * value with the new value. And if only the timeout gets updated,
1656 	 * then no need for any HCI transactions.
1657 	 */
1658 	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1659 	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1660 					  &hdev->dev_flags)) {
1661 		cancel_delayed_work(&hdev->discov_off);
1662 		hdev->discov_timeout = timeout;
1663 
1664 		if (cp->val && hdev->discov_timeout > 0) {
1665 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1666 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1667 					   to);
1668 		}
1669 
1670 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1671 		goto failed;
1672 	}
1673 
1674 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1675 	if (!cmd) {
1676 		err = -ENOMEM;
1677 		goto failed;
1678 	}
1679 
1680 	/* Cancel any potential discoverable timeout that might be
1681 	 * still active and store new timeout value. The arming of
1682 	 * the timeout happens in the complete handler.
1683 	 */
1684 	cancel_delayed_work(&hdev->discov_off);
1685 	hdev->discov_timeout = timeout;
1686 
1687 	/* Limited discoverable mode */
1688 	if (cp->val == 0x02)
1689 		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1690 	else
1691 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1692 
1693 	hci_req_init(&req, hdev);
1694 
1695 	/* The procedure for LE-only controllers is much simpler - just
1696 	 * update the advertising data.
1697 	 */
1698 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1699 		goto update_ad;
1700 
1701 	scan = SCAN_PAGE;
1702 
1703 	if (cp->val) {
1704 		struct hci_cp_write_current_iac_lap hci_cp;
1705 
1706 		if (cp->val == 0x02) {
1707 			/* Limited discoverable mode */
1708 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1709 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1710 			hci_cp.iac_lap[1] = 0x8b;
1711 			hci_cp.iac_lap[2] = 0x9e;
1712 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1713 			hci_cp.iac_lap[4] = 0x8b;
1714 			hci_cp.iac_lap[5] = 0x9e;
1715 		} else {
1716 			/* General discoverable mode */
1717 			hci_cp.num_iac = 1;
1718 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1719 			hci_cp.iac_lap[1] = 0x8b;
1720 			hci_cp.iac_lap[2] = 0x9e;
1721 		}
1722 
1723 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1724 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1725 
1726 		scan |= SCAN_INQUIRY;
1727 	} else {
1728 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1729 	}
1730 
1731 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1732 
1733 update_ad:
1734 	update_adv_data(&req);
1735 
1736 	err = hci_req_run(&req, set_discoverable_complete);
1737 	if (err < 0)
1738 		mgmt_pending_remove(cmd);
1739 
1740 failed:
1741 	hci_dev_unlock(hdev);
1742 	return err;
1743 }
1744 
1745 static void write_fast_connectable(struct hci_request *req, bool enable)
1746 {
1747 	struct hci_dev *hdev = req->hdev;
1748 	struct hci_cp_write_page_scan_activity acp;
1749 	u8 type;
1750 
1751 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1752 		return;
1753 
1754 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1755 		return;
1756 
1757 	if (enable) {
1758 		type = PAGE_SCAN_TYPE_INTERLACED;
1759 
1760 		/* 160 msec page scan interval */
1761 		acp.interval = cpu_to_le16(0x0100);
1762 	} else {
1763 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1764 
1765 		/* default 1.28 sec page scan */
1766 		acp.interval = cpu_to_le16(0x0800);
1767 	}
1768 
1769 	acp.window = cpu_to_le16(0x0012);
1770 
1771 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1772 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1773 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1774 			    sizeof(acp), &acp);
1775 
1776 	if (hdev->page_scan_type != type)
1777 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1778 }
1779 
1780 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1781 {
1782 	struct pending_cmd *cmd;
1783 	struct mgmt_mode *cp;
1784 	bool conn_changed, discov_changed;
1785 
1786 	BT_DBG("status 0x%02x", status);
1787 
1788 	hci_dev_lock(hdev);
1789 
1790 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1791 	if (!cmd)
1792 		goto unlock;
1793 
1794 	if (status) {
1795 		u8 mgmt_err = mgmt_status(status);
1796 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1797 		goto remove_cmd;
1798 	}
1799 
1800 	cp = cmd->param;
1801 	if (cp->val) {
1802 		conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1803 						 &hdev->dev_flags);
1804 		discov_changed = false;
1805 	} else {
1806 		conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1807 						  &hdev->dev_flags);
1808 		discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1809 						    &hdev->dev_flags);
1810 	}
1811 
1812 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1813 
1814 	if (conn_changed || discov_changed) {
1815 		new_settings(hdev, cmd->sk);
1816 		hci_update_page_scan(hdev, NULL);
1817 		if (discov_changed)
1818 			mgmt_update_adv_data(hdev);
1819 		hci_update_background_scan(hdev);
1820 	}
1821 
1822 remove_cmd:
1823 	mgmt_pending_remove(cmd);
1824 
1825 unlock:
1826 	hci_dev_unlock(hdev);
1827 }
1828 
1829 static int set_connectable_update_settings(struct hci_dev *hdev,
1830 					   struct sock *sk, u8 val)
1831 {
1832 	bool changed = false;
1833 	int err;
1834 
1835 	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1836 		changed = true;
1837 
1838 	if (val) {
1839 		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1840 	} else {
1841 		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1842 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1843 	}
1844 
1845 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1846 	if (err < 0)
1847 		return err;
1848 
1849 	if (changed) {
1850 		hci_update_page_scan(hdev, NULL);
1851 		hci_update_background_scan(hdev);
1852 		return new_settings(hdev, sk);
1853 	}
1854 
1855 	return 0;
1856 }
1857 
1858 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1859 			   u16 len)
1860 {
1861 	struct mgmt_mode *cp = data;
1862 	struct pending_cmd *cmd;
1863 	struct hci_request req;
1864 	u8 scan;
1865 	int err;
1866 
1867 	BT_DBG("request for %s", hdev->name);
1868 
1869 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1870 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1871 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1872 				  MGMT_STATUS_REJECTED);
1873 
1874 	if (cp->val != 0x00 && cp->val != 0x01)
1875 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1876 				  MGMT_STATUS_INVALID_PARAMS);
1877 
1878 	hci_dev_lock(hdev);
1879 
1880 	if (!hdev_is_powered(hdev)) {
1881 		err = set_connectable_update_settings(hdev, sk, cp->val);
1882 		goto failed;
1883 	}
1884 
1885 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1886 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1887 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1888 				 MGMT_STATUS_BUSY);
1889 		goto failed;
1890 	}
1891 
1892 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1893 	if (!cmd) {
1894 		err = -ENOMEM;
1895 		goto failed;
1896 	}
1897 
1898 	hci_req_init(&req, hdev);
1899 
1900 	/* If BR/EDR is not enabled and we disable advertising as a
1901 	 * by-product of disabling connectable, we need to update the
1902 	 * advertising flags.
1903 	 */
1904 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1905 		if (!cp->val) {
1906 			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1907 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1908 		}
1909 		update_adv_data(&req);
1910 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1911 		if (cp->val) {
1912 			scan = SCAN_PAGE;
1913 		} else {
1914 			/* If we don't have any whitelist entries just
1915 			 * disable all scanning. If there are entries
1916 			 * and we had both page and inquiry scanning
1917 			 * enabled then fall back to only page scanning.
1918 			 * Otherwise no changes are needed.
1919 			 */
1920 			if (list_empty(&hdev->whitelist))
1921 				scan = SCAN_DISABLED;
1922 			else if (test_bit(HCI_ISCAN, &hdev->flags))
1923 				scan = SCAN_PAGE;
1924 			else
1925 				goto no_scan_update;
1926 
1927 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1928 			    hdev->discov_timeout > 0)
1929 				cancel_delayed_work(&hdev->discov_off);
1930 		}
1931 
1932 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1933 	}
1934 
1935 no_scan_update:
1936 	/* If we're going from non-connectable to connectable or
1937 	 * vice-versa when fast connectable is enabled ensure that fast
1938 	 * connectable gets disabled. write_fast_connectable won't do
1939 	 * anything if the page scan parameters are already what they
1940 	 * should be.
1941 	 */
1942 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1943 		write_fast_connectable(&req, false);
1944 
1945 	/* Update the advertising parameters if necessary */
1946 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1947 		enable_advertising(&req);
1948 
1949 	err = hci_req_run(&req, set_connectable_complete);
1950 	if (err < 0) {
1951 		mgmt_pending_remove(cmd);
1952 		if (err == -ENODATA)
1953 			err = set_connectable_update_settings(hdev, sk,
1954 							      cp->val);
1955 		goto failed;
1956 	}
1957 
1958 failed:
1959 	hci_dev_unlock(hdev);
1960 	return err;
1961 }
1962 
1963 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1964 			u16 len)
1965 {
1966 	struct mgmt_mode *cp = data;
1967 	bool changed;
1968 	int err;
1969 
1970 	BT_DBG("request for %s", hdev->name);
1971 
1972 	if (cp->val != 0x00 && cp->val != 0x01)
1973 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1974 				  MGMT_STATUS_INVALID_PARAMS);
1975 
1976 	hci_dev_lock(hdev);
1977 
1978 	if (cp->val)
1979 		changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1980 	else
1981 		changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1982 
1983 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1984 	if (err < 0)
1985 		goto unlock;
1986 
1987 	if (changed)
1988 		err = new_settings(hdev, sk);
1989 
1990 unlock:
1991 	hci_dev_unlock(hdev);
1992 	return err;
1993 }
1994 
1995 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1996 			     u16 len)
1997 {
1998 	struct mgmt_mode *cp = data;
1999 	struct pending_cmd *cmd;
2000 	u8 val, status;
2001 	int err;
2002 
2003 	BT_DBG("request for %s", hdev->name);
2004 
2005 	status = mgmt_bredr_support(hdev);
2006 	if (status)
2007 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2008 				  status);
2009 
2010 	if (cp->val != 0x00 && cp->val != 0x01)
2011 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 				  MGMT_STATUS_INVALID_PARAMS);
2013 
2014 	hci_dev_lock(hdev);
2015 
2016 	if (!hdev_is_powered(hdev)) {
2017 		bool changed = false;
2018 
2019 		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2020 					  &hdev->dev_flags)) {
2021 			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2022 			changed = true;
2023 		}
2024 
2025 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2026 		if (err < 0)
2027 			goto failed;
2028 
2029 		if (changed)
2030 			err = new_settings(hdev, sk);
2031 
2032 		goto failed;
2033 	}
2034 
2035 	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2036 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2037 				 MGMT_STATUS_BUSY);
2038 		goto failed;
2039 	}
2040 
2041 	val = !!cp->val;
2042 
2043 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2044 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2045 		goto failed;
2046 	}
2047 
2048 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2049 	if (!cmd) {
2050 		err = -ENOMEM;
2051 		goto failed;
2052 	}
2053 
2054 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2055 	if (err < 0) {
2056 		mgmt_pending_remove(cmd);
2057 		goto failed;
2058 	}
2059 
2060 failed:
2061 	hci_dev_unlock(hdev);
2062 	return err;
2063 }
2064 
2065 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2066 {
2067 	struct mgmt_mode *cp = data;
2068 	struct pending_cmd *cmd;
2069 	u8 status;
2070 	int err;
2071 
2072 	BT_DBG("request for %s", hdev->name);
2073 
2074 	status = mgmt_bredr_support(hdev);
2075 	if (status)
2076 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2077 
2078 	if (!lmp_ssp_capable(hdev))
2079 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2080 				  MGMT_STATUS_NOT_SUPPORTED);
2081 
2082 	if (cp->val != 0x00 && cp->val != 0x01)
2083 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 				  MGMT_STATUS_INVALID_PARAMS);
2085 
2086 	hci_dev_lock(hdev);
2087 
2088 	if (!hdev_is_powered(hdev)) {
2089 		bool changed;
2090 
2091 		if (cp->val) {
2092 			changed = !test_and_set_bit(HCI_SSP_ENABLED,
2093 						    &hdev->dev_flags);
2094 		} else {
2095 			changed = test_and_clear_bit(HCI_SSP_ENABLED,
2096 						     &hdev->dev_flags);
2097 			if (!changed)
2098 				changed = test_and_clear_bit(HCI_HS_ENABLED,
2099 							     &hdev->dev_flags);
2100 			else
2101 				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2102 		}
2103 
2104 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2105 		if (err < 0)
2106 			goto failed;
2107 
2108 		if (changed)
2109 			err = new_settings(hdev, sk);
2110 
2111 		goto failed;
2112 	}
2113 
2114 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2115 	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2116 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2117 				 MGMT_STATUS_BUSY);
2118 		goto failed;
2119 	}
2120 
2121 	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2122 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2123 		goto failed;
2124 	}
2125 
2126 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2127 	if (!cmd) {
2128 		err = -ENOMEM;
2129 		goto failed;
2130 	}
2131 
2132 	if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2133 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2134 			     sizeof(cp->val), &cp->val);
2135 
2136 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2137 	if (err < 0) {
2138 		mgmt_pending_remove(cmd);
2139 		goto failed;
2140 	}
2141 
2142 failed:
2143 	hci_dev_unlock(hdev);
2144 	return err;
2145 }
2146 
2147 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2148 {
2149 	struct mgmt_mode *cp = data;
2150 	bool changed;
2151 	u8 status;
2152 	int err;
2153 
2154 	BT_DBG("request for %s", hdev->name);
2155 
2156 	status = mgmt_bredr_support(hdev);
2157 	if (status)
2158 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2159 
2160 	if (!lmp_ssp_capable(hdev))
2161 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2162 				  MGMT_STATUS_NOT_SUPPORTED);
2163 
2164 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2165 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2166 				  MGMT_STATUS_REJECTED);
2167 
2168 	if (cp->val != 0x00 && cp->val != 0x01)
2169 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2170 				  MGMT_STATUS_INVALID_PARAMS);
2171 
2172 	hci_dev_lock(hdev);
2173 
2174 	if (cp->val) {
2175 		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2176 	} else {
2177 		if (hdev_is_powered(hdev)) {
2178 			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2179 					 MGMT_STATUS_REJECTED);
2180 			goto unlock;
2181 		}
2182 
2183 		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2184 	}
2185 
2186 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2187 	if (err < 0)
2188 		goto unlock;
2189 
2190 	if (changed)
2191 		err = new_settings(hdev, sk);
2192 
2193 unlock:
2194 	hci_dev_unlock(hdev);
2195 	return err;
2196 }
2197 
2198 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2199 {
2200 	struct cmd_lookup match = { NULL, hdev };
2201 
2202 	hci_dev_lock(hdev);
2203 
2204 	if (status) {
2205 		u8 mgmt_err = mgmt_status(status);
2206 
2207 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2208 				     &mgmt_err);
2209 		goto unlock;
2210 	}
2211 
2212 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2213 
2214 	new_settings(hdev, match.sk);
2215 
2216 	if (match.sk)
2217 		sock_put(match.sk);
2218 
2219 	/* Make sure the controller has a good default for
2220 	 * advertising data. Restrict the update to when LE
2221 	 * has actually been enabled. During power on, the
2222 	 * update in powered_update_hci will take care of it.
2223 	 */
2224 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2225 		struct hci_request req;
2226 
2227 		hci_req_init(&req, hdev);
2228 		update_adv_data(&req);
2229 		update_scan_rsp_data(&req);
2230 		hci_req_run(&req, NULL);
2231 
2232 		hci_update_background_scan(hdev);
2233 	}
2234 
2235 unlock:
2236 	hci_dev_unlock(hdev);
2237 }
2238 
2239 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2240 {
2241 	struct mgmt_mode *cp = data;
2242 	struct hci_cp_write_le_host_supported hci_cp;
2243 	struct pending_cmd *cmd;
2244 	struct hci_request req;
2245 	int err;
2246 	u8 val, enabled;
2247 
2248 	BT_DBG("request for %s", hdev->name);
2249 
2250 	if (!lmp_le_capable(hdev))
2251 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2252 				  MGMT_STATUS_NOT_SUPPORTED);
2253 
2254 	if (cp->val != 0x00 && cp->val != 0x01)
2255 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2256 				  MGMT_STATUS_INVALID_PARAMS);
2257 
2258 	/* LE-only devices do not allow toggling LE on/off */
2259 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2260 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2261 				  MGMT_STATUS_REJECTED);
2262 
2263 	hci_dev_lock(hdev);
2264 
2265 	val = !!cp->val;
2266 	enabled = lmp_host_le_capable(hdev);
2267 
2268 	if (!hdev_is_powered(hdev) || val == enabled) {
2269 		bool changed = false;
2270 
2271 		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2272 			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2273 			changed = true;
2274 		}
2275 
2276 		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2277 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2278 			changed = true;
2279 		}
2280 
2281 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2282 		if (err < 0)
2283 			goto unlock;
2284 
2285 		if (changed)
2286 			err = new_settings(hdev, sk);
2287 
2288 		goto unlock;
2289 	}
2290 
2291 	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2292 	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2293 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2294 				 MGMT_STATUS_BUSY);
2295 		goto unlock;
2296 	}
2297 
2298 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2299 	if (!cmd) {
2300 		err = -ENOMEM;
2301 		goto unlock;
2302 	}
2303 
2304 	hci_req_init(&req, hdev);
2305 
2306 	memset(&hci_cp, 0, sizeof(hci_cp));
2307 
2308 	if (val) {
2309 		hci_cp.le = val;
2310 		hci_cp.simul = 0x00;
2311 	} else {
2312 		if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2313 			disable_advertising(&req);
2314 	}
2315 
2316 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2317 		    &hci_cp);
2318 
2319 	err = hci_req_run(&req, le_enable_complete);
2320 	if (err < 0)
2321 		mgmt_pending_remove(cmd);
2322 
2323 unlock:
2324 	hci_dev_unlock(hdev);
2325 	return err;
2326 }
2327 
2328 /* This is a helper function to test for pending mgmt commands that can
2329  * cause CoD or EIR HCI commands. We can only allow one such pending
2330  * mgmt command at a time since otherwise we cannot easily track what
2331  * the current values are, will be, and based on that calculate if a new
2332  * HCI command needs to be sent and if yes with what value.
2333  */
2334 static bool pending_eir_or_class(struct hci_dev *hdev)
2335 {
2336 	struct pending_cmd *cmd;
2337 
2338 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2339 		switch (cmd->opcode) {
2340 		case MGMT_OP_ADD_UUID:
2341 		case MGMT_OP_REMOVE_UUID:
2342 		case MGMT_OP_SET_DEV_CLASS:
2343 		case MGMT_OP_SET_POWERED:
2344 			return true;
2345 		}
2346 	}
2347 
2348 	return false;
2349 }
2350 
2351 static const u8 bluetooth_base_uuid[] = {
2352 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2353 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2354 };
2355 
2356 static u8 get_uuid_size(const u8 *uuid)
2357 {
2358 	u32 val;
2359 
2360 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2361 		return 128;
2362 
2363 	val = get_unaligned_le32(&uuid[12]);
2364 	if (val > 0xffff)
2365 		return 32;
2366 
2367 	return 16;
2368 }
2369 
2370 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2371 {
2372 	struct pending_cmd *cmd;
2373 
2374 	hci_dev_lock(hdev);
2375 
2376 	cmd = mgmt_pending_find(mgmt_op, hdev);
2377 	if (!cmd)
2378 		goto unlock;
2379 
2380 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2381 		     hdev->dev_class, 3);
2382 
2383 	mgmt_pending_remove(cmd);
2384 
2385 unlock:
2386 	hci_dev_unlock(hdev);
2387 }
2388 
2389 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2390 {
2391 	BT_DBG("status 0x%02x", status);
2392 
2393 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2394 }
2395 
2396 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2397 {
2398 	struct mgmt_cp_add_uuid *cp = data;
2399 	struct pending_cmd *cmd;
2400 	struct hci_request req;
2401 	struct bt_uuid *uuid;
2402 	int err;
2403 
2404 	BT_DBG("request for %s", hdev->name);
2405 
2406 	hci_dev_lock(hdev);
2407 
2408 	if (pending_eir_or_class(hdev)) {
2409 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2410 				 MGMT_STATUS_BUSY);
2411 		goto failed;
2412 	}
2413 
2414 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2415 	if (!uuid) {
2416 		err = -ENOMEM;
2417 		goto failed;
2418 	}
2419 
2420 	memcpy(uuid->uuid, cp->uuid, 16);
2421 	uuid->svc_hint = cp->svc_hint;
2422 	uuid->size = get_uuid_size(cp->uuid);
2423 
2424 	list_add_tail(&uuid->list, &hdev->uuids);
2425 
2426 	hci_req_init(&req, hdev);
2427 
2428 	update_class(&req);
2429 	update_eir(&req);
2430 
2431 	err = hci_req_run(&req, add_uuid_complete);
2432 	if (err < 0) {
2433 		if (err != -ENODATA)
2434 			goto failed;
2435 
2436 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2437 				   hdev->dev_class, 3);
2438 		goto failed;
2439 	}
2440 
2441 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2442 	if (!cmd) {
2443 		err = -ENOMEM;
2444 		goto failed;
2445 	}
2446 
2447 	err = 0;
2448 
2449 failed:
2450 	hci_dev_unlock(hdev);
2451 	return err;
2452 }
2453 
2454 static bool enable_service_cache(struct hci_dev *hdev)
2455 {
2456 	if (!hdev_is_powered(hdev))
2457 		return false;
2458 
2459 	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2460 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2461 				   CACHE_TIMEOUT);
2462 		return true;
2463 	}
2464 
2465 	return false;
2466 }
2467 
2468 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2469 {
2470 	BT_DBG("status 0x%02x", status);
2471 
2472 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2473 }
2474 
2475 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2476 		       u16 len)
2477 {
2478 	struct mgmt_cp_remove_uuid *cp = data;
2479 	struct pending_cmd *cmd;
2480 	struct bt_uuid *match, *tmp;
2481 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2482 	struct hci_request req;
2483 	int err, found;
2484 
2485 	BT_DBG("request for %s", hdev->name);
2486 
2487 	hci_dev_lock(hdev);
2488 
2489 	if (pending_eir_or_class(hdev)) {
2490 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2491 				 MGMT_STATUS_BUSY);
2492 		goto unlock;
2493 	}
2494 
2495 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2496 		hci_uuids_clear(hdev);
2497 
2498 		if (enable_service_cache(hdev)) {
2499 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2500 					   0, hdev->dev_class, 3);
2501 			goto unlock;
2502 		}
2503 
2504 		goto update_class;
2505 	}
2506 
2507 	found = 0;
2508 
2509 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2510 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2511 			continue;
2512 
2513 		list_del(&match->list);
2514 		kfree(match);
2515 		found++;
2516 	}
2517 
2518 	if (found == 0) {
2519 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2520 				 MGMT_STATUS_INVALID_PARAMS);
2521 		goto unlock;
2522 	}
2523 
2524 update_class:
2525 	hci_req_init(&req, hdev);
2526 
2527 	update_class(&req);
2528 	update_eir(&req);
2529 
2530 	err = hci_req_run(&req, remove_uuid_complete);
2531 	if (err < 0) {
2532 		if (err != -ENODATA)
2533 			goto unlock;
2534 
2535 		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2536 				   hdev->dev_class, 3);
2537 		goto unlock;
2538 	}
2539 
2540 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2541 	if (!cmd) {
2542 		err = -ENOMEM;
2543 		goto unlock;
2544 	}
2545 
2546 	err = 0;
2547 
2548 unlock:
2549 	hci_dev_unlock(hdev);
2550 	return err;
2551 }
2552 
2553 static void set_class_complete(struct hci_dev *hdev, u8 status)
2554 {
2555 	BT_DBG("status 0x%02x", status);
2556 
2557 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2558 }
2559 
2560 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2561 			 u16 len)
2562 {
2563 	struct mgmt_cp_set_dev_class *cp = data;
2564 	struct pending_cmd *cmd;
2565 	struct hci_request req;
2566 	int err;
2567 
2568 	BT_DBG("request for %s", hdev->name);
2569 
2570 	if (!lmp_bredr_capable(hdev))
2571 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2572 				  MGMT_STATUS_NOT_SUPPORTED);
2573 
2574 	hci_dev_lock(hdev);
2575 
2576 	if (pending_eir_or_class(hdev)) {
2577 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2578 				 MGMT_STATUS_BUSY);
2579 		goto unlock;
2580 	}
2581 
2582 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2583 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2584 				 MGMT_STATUS_INVALID_PARAMS);
2585 		goto unlock;
2586 	}
2587 
2588 	hdev->major_class = cp->major;
2589 	hdev->minor_class = cp->minor;
2590 
2591 	if (!hdev_is_powered(hdev)) {
2592 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2593 				   hdev->dev_class, 3);
2594 		goto unlock;
2595 	}
2596 
2597 	hci_req_init(&req, hdev);
2598 
2599 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2600 		hci_dev_unlock(hdev);
2601 		cancel_delayed_work_sync(&hdev->service_cache);
2602 		hci_dev_lock(hdev);
2603 		update_eir(&req);
2604 	}
2605 
2606 	update_class(&req);
2607 
2608 	err = hci_req_run(&req, set_class_complete);
2609 	if (err < 0) {
2610 		if (err != -ENODATA)
2611 			goto unlock;
2612 
2613 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2614 				   hdev->dev_class, 3);
2615 		goto unlock;
2616 	}
2617 
2618 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2619 	if (!cmd) {
2620 		err = -ENOMEM;
2621 		goto unlock;
2622 	}
2623 
2624 	err = 0;
2625 
2626 unlock:
2627 	hci_dev_unlock(hdev);
2628 	return err;
2629 }
2630 
2631 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2632 			  u16 len)
2633 {
2634 	struct mgmt_cp_load_link_keys *cp = data;
2635 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2636 				   sizeof(struct mgmt_link_key_info));
2637 	u16 key_count, expected_len;
2638 	bool changed;
2639 	int i;
2640 
2641 	BT_DBG("request for %s", hdev->name);
2642 
2643 	if (!lmp_bredr_capable(hdev))
2644 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2645 				  MGMT_STATUS_NOT_SUPPORTED);
2646 
2647 	key_count = __le16_to_cpu(cp->key_count);
2648 	if (key_count > max_key_count) {
2649 		BT_ERR("load_link_keys: too big key_count value %u",
2650 		       key_count);
2651 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2652 				  MGMT_STATUS_INVALID_PARAMS);
2653 	}
2654 
2655 	expected_len = sizeof(*cp) + key_count *
2656 					sizeof(struct mgmt_link_key_info);
2657 	if (expected_len != len) {
2658 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2659 		       expected_len, len);
2660 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2661 				  MGMT_STATUS_INVALID_PARAMS);
2662 	}
2663 
2664 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2665 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2666 				  MGMT_STATUS_INVALID_PARAMS);
2667 
2668 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2669 	       key_count);
2670 
2671 	for (i = 0; i < key_count; i++) {
2672 		struct mgmt_link_key_info *key = &cp->keys[i];
2673 
2674 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2675 			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2676 					  MGMT_STATUS_INVALID_PARAMS);
2677 	}
2678 
2679 	hci_dev_lock(hdev);
2680 
2681 	hci_link_keys_clear(hdev);
2682 
2683 	if (cp->debug_keys)
2684 		changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2685 					    &hdev->dev_flags);
2686 	else
2687 		changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2688 					     &hdev->dev_flags);
2689 
2690 	if (changed)
2691 		new_settings(hdev, NULL);
2692 
2693 	for (i = 0; i < key_count; i++) {
2694 		struct mgmt_link_key_info *key = &cp->keys[i];
2695 
2696 		/* Always ignore debug keys and require a new pairing if
2697 		 * the user wants to use them.
2698 		 */
2699 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2700 			continue;
2701 
2702 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2703 				 key->type, key->pin_len, NULL);
2704 	}
2705 
2706 	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2707 
2708 	hci_dev_unlock(hdev);
2709 
2710 	return 0;
2711 }
2712 
2713 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2714 			   u8 addr_type, struct sock *skip_sk)
2715 {
2716 	struct mgmt_ev_device_unpaired ev;
2717 
2718 	bacpy(&ev.addr.bdaddr, bdaddr);
2719 	ev.addr.type = addr_type;
2720 
2721 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2722 			  skip_sk);
2723 }
2724 
2725 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2726 			 u16 len)
2727 {
2728 	struct mgmt_cp_unpair_device *cp = data;
2729 	struct mgmt_rp_unpair_device rp;
2730 	struct hci_cp_disconnect dc;
2731 	struct pending_cmd *cmd;
2732 	struct hci_conn *conn;
2733 	int err;
2734 
2735 	memset(&rp, 0, sizeof(rp));
2736 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2737 	rp.addr.type = cp->addr.type;
2738 
2739 	if (!bdaddr_type_is_valid(cp->addr.type))
2740 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2741 				    MGMT_STATUS_INVALID_PARAMS,
2742 				    &rp, sizeof(rp));
2743 
2744 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2745 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2746 				    MGMT_STATUS_INVALID_PARAMS,
2747 				    &rp, sizeof(rp));
2748 
2749 	hci_dev_lock(hdev);
2750 
2751 	if (!hdev_is_powered(hdev)) {
2752 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2753 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2754 		goto unlock;
2755 	}
2756 
2757 	if (cp->addr.type == BDADDR_BREDR) {
2758 		/* If disconnection is requested, then look up the
2759 		 * connection. If the remote device is connected, it
2760 		 * will be later used to terminate the link.
2761 		 *
2762 		 * Setting it to NULL explicitly will cause no
2763 		 * termination of the link.
2764 		 */
2765 		if (cp->disconnect)
2766 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2767 						       &cp->addr.bdaddr);
2768 		else
2769 			conn = NULL;
2770 
2771 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2772 	} else {
2773 		u8 addr_type;
2774 
2775 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2776 					       &cp->addr.bdaddr);
2777 		if (conn) {
2778 			/* Defer clearing up the connection parameters
2779 			 * until closing to give a chance of keeping
2780 			 * them if a repairing happens.
2781 			 */
2782 			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2783 
2784 			/* If disconnection is not requested, then
2785 			 * clear the connection variable so that the
2786 			 * link is not terminated.
2787 			 */
2788 			if (!cp->disconnect)
2789 				conn = NULL;
2790 		}
2791 
2792 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2793 			addr_type = ADDR_LE_DEV_PUBLIC;
2794 		else
2795 			addr_type = ADDR_LE_DEV_RANDOM;
2796 
2797 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2798 
2799 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2800 	}
2801 
2802 	if (err < 0) {
2803 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2804 				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2805 		goto unlock;
2806 	}
2807 
2808 	/* If the connection variable is set, then termination of the
2809 	 * link is requested.
2810 	 */
2811 	if (!conn) {
2812 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2813 				   &rp, sizeof(rp));
2814 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2815 		goto unlock;
2816 	}
2817 
2818 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2819 			       sizeof(*cp));
2820 	if (!cmd) {
2821 		err = -ENOMEM;
2822 		goto unlock;
2823 	}
2824 
2825 	cmd->cmd_complete = addr_cmd_complete;
2826 
2827 	dc.handle = cpu_to_le16(conn->handle);
2828 	dc.reason = 0x13; /* Remote User Terminated Connection */
2829 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2830 	if (err < 0)
2831 		mgmt_pending_remove(cmd);
2832 
2833 unlock:
2834 	hci_dev_unlock(hdev);
2835 	return err;
2836 }
2837 
2838 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2839 		      u16 len)
2840 {
2841 	struct mgmt_cp_disconnect *cp = data;
2842 	struct mgmt_rp_disconnect rp;
2843 	struct pending_cmd *cmd;
2844 	struct hci_conn *conn;
2845 	int err;
2846 
2847 	BT_DBG("");
2848 
2849 	memset(&rp, 0, sizeof(rp));
2850 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2851 	rp.addr.type = cp->addr.type;
2852 
2853 	if (!bdaddr_type_is_valid(cp->addr.type))
2854 		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2855 				    MGMT_STATUS_INVALID_PARAMS,
2856 				    &rp, sizeof(rp));
2857 
2858 	hci_dev_lock(hdev);
2859 
2860 	if (!test_bit(HCI_UP, &hdev->flags)) {
2861 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2862 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2863 		goto failed;
2864 	}
2865 
2866 	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2867 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2868 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2869 		goto failed;
2870 	}
2871 
2872 	if (cp->addr.type == BDADDR_BREDR)
2873 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2874 					       &cp->addr.bdaddr);
2875 	else
2876 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2877 
2878 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2879 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2880 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2881 		goto failed;
2882 	}
2883 
2884 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2885 	if (!cmd) {
2886 		err = -ENOMEM;
2887 		goto failed;
2888 	}
2889 
2890 	cmd->cmd_complete = generic_cmd_complete;
2891 
2892 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2893 	if (err < 0)
2894 		mgmt_pending_remove(cmd);
2895 
2896 failed:
2897 	hci_dev_unlock(hdev);
2898 	return err;
2899 }
2900 
2901 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2902 {
2903 	switch (link_type) {
2904 	case LE_LINK:
2905 		switch (addr_type) {
2906 		case ADDR_LE_DEV_PUBLIC:
2907 			return BDADDR_LE_PUBLIC;
2908 
2909 		default:
2910 			/* Fallback to LE Random address type */
2911 			return BDADDR_LE_RANDOM;
2912 		}
2913 
2914 	default:
2915 		/* Fallback to BR/EDR type */
2916 		return BDADDR_BREDR;
2917 	}
2918 }
2919 
2920 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2921 			   u16 data_len)
2922 {
2923 	struct mgmt_rp_get_connections *rp;
2924 	struct hci_conn *c;
2925 	size_t rp_len;
2926 	int err;
2927 	u16 i;
2928 
2929 	BT_DBG("");
2930 
2931 	hci_dev_lock(hdev);
2932 
2933 	if (!hdev_is_powered(hdev)) {
2934 		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2935 				 MGMT_STATUS_NOT_POWERED);
2936 		goto unlock;
2937 	}
2938 
2939 	i = 0;
2940 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2941 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2942 			i++;
2943 	}
2944 
2945 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2946 	rp = kmalloc(rp_len, GFP_KERNEL);
2947 	if (!rp) {
2948 		err = -ENOMEM;
2949 		goto unlock;
2950 	}
2951 
2952 	i = 0;
2953 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2954 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2955 			continue;
2956 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2957 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2958 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2959 			continue;
2960 		i++;
2961 	}
2962 
2963 	rp->conn_count = cpu_to_le16(i);
2964 
2965 	/* Recalculate length in case of filtered SCO connections, etc */
2966 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2967 
2968 	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2969 			   rp_len);
2970 
2971 	kfree(rp);
2972 
2973 unlock:
2974 	hci_dev_unlock(hdev);
2975 	return err;
2976 }
2977 
2978 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2979 				   struct mgmt_cp_pin_code_neg_reply *cp)
2980 {
2981 	struct pending_cmd *cmd;
2982 	int err;
2983 
2984 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2985 			       sizeof(*cp));
2986 	if (!cmd)
2987 		return -ENOMEM;
2988 
2989 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2990 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2991 	if (err < 0)
2992 		mgmt_pending_remove(cmd);
2993 
2994 	return err;
2995 }
2996 
2997 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2998 			  u16 len)
2999 {
3000 	struct hci_conn *conn;
3001 	struct mgmt_cp_pin_code_reply *cp = data;
3002 	struct hci_cp_pin_code_reply reply;
3003 	struct pending_cmd *cmd;
3004 	int err;
3005 
3006 	BT_DBG("");
3007 
3008 	hci_dev_lock(hdev);
3009 
3010 	if (!hdev_is_powered(hdev)) {
3011 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3012 				 MGMT_STATUS_NOT_POWERED);
3013 		goto failed;
3014 	}
3015 
3016 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3017 	if (!conn) {
3018 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3019 				 MGMT_STATUS_NOT_CONNECTED);
3020 		goto failed;
3021 	}
3022 
3023 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3024 		struct mgmt_cp_pin_code_neg_reply ncp;
3025 
3026 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3027 
3028 		BT_ERR("PIN code is not 16 bytes long");
3029 
3030 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3031 		if (err >= 0)
3032 			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3033 					 MGMT_STATUS_INVALID_PARAMS);
3034 
3035 		goto failed;
3036 	}
3037 
3038 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3039 	if (!cmd) {
3040 		err = -ENOMEM;
3041 		goto failed;
3042 	}
3043 
3044 	cmd->cmd_complete = addr_cmd_complete;
3045 
3046 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3047 	reply.pin_len = cp->pin_len;
3048 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3049 
3050 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3051 	if (err < 0)
3052 		mgmt_pending_remove(cmd);
3053 
3054 failed:
3055 	hci_dev_unlock(hdev);
3056 	return err;
3057 }
3058 
3059 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3060 			     u16 len)
3061 {
3062 	struct mgmt_cp_set_io_capability *cp = data;
3063 
3064 	BT_DBG("");
3065 
3066 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3067 		return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3068 				    MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3069 
3070 	hci_dev_lock(hdev);
3071 
3072 	hdev->io_capability = cp->io_capability;
3073 
3074 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3075 	       hdev->io_capability);
3076 
3077 	hci_dev_unlock(hdev);
3078 
3079 	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3080 			    0);
3081 }
3082 
3083 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3084 {
3085 	struct hci_dev *hdev = conn->hdev;
3086 	struct pending_cmd *cmd;
3087 
3088 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3089 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3090 			continue;
3091 
3092 		if (cmd->user_data != conn)
3093 			continue;
3094 
3095 		return cmd;
3096 	}
3097 
3098 	return NULL;
3099 }
3100 
3101 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3102 {
3103 	struct mgmt_rp_pair_device rp;
3104 	struct hci_conn *conn = cmd->user_data;
3105 
3106 	bacpy(&rp.addr.bdaddr, &conn->dst);
3107 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3108 
3109 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3110 		     &rp, sizeof(rp));
3111 
3112 	/* So we don't get further callbacks for this connection */
3113 	conn->connect_cfm_cb = NULL;
3114 	conn->security_cfm_cb = NULL;
3115 	conn->disconn_cfm_cb = NULL;
3116 
3117 	hci_conn_drop(conn);
3118 
3119 	/* The device is paired so there is no need to remove
3120 	 * its connection parameters anymore.
3121 	 */
3122 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3123 
3124 	hci_conn_put(conn);
3125 }
3126 
3127 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3128 {
3129 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3130 	struct pending_cmd *cmd;
3131 
3132 	cmd = find_pairing(conn);
3133 	if (cmd) {
3134 		cmd->cmd_complete(cmd, status);
3135 		mgmt_pending_remove(cmd);
3136 	}
3137 }
3138 
3139 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3140 {
3141 	struct pending_cmd *cmd;
3142 
3143 	BT_DBG("status %u", status);
3144 
3145 	cmd = find_pairing(conn);
3146 	if (!cmd) {
3147 		BT_DBG("Unable to find a pending command");
3148 		return;
3149 	}
3150 
3151 	cmd->cmd_complete(cmd, mgmt_status(status));
3152 	mgmt_pending_remove(cmd);
3153 }
3154 
3155 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3156 {
3157 	struct pending_cmd *cmd;
3158 
3159 	BT_DBG("status %u", status);
3160 
3161 	if (!status)
3162 		return;
3163 
3164 	cmd = find_pairing(conn);
3165 	if (!cmd) {
3166 		BT_DBG("Unable to find a pending command");
3167 		return;
3168 	}
3169 
3170 	cmd->cmd_complete(cmd, mgmt_status(status));
3171 	mgmt_pending_remove(cmd);
3172 }
3173 
3174 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3175 		       u16 len)
3176 {
3177 	struct mgmt_cp_pair_device *cp = data;
3178 	struct mgmt_rp_pair_device rp;
3179 	struct pending_cmd *cmd;
3180 	u8 sec_level, auth_type;
3181 	struct hci_conn *conn;
3182 	int err;
3183 
3184 	BT_DBG("");
3185 
3186 	memset(&rp, 0, sizeof(rp));
3187 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3188 	rp.addr.type = cp->addr.type;
3189 
3190 	if (!bdaddr_type_is_valid(cp->addr.type))
3191 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3192 				    MGMT_STATUS_INVALID_PARAMS,
3193 				    &rp, sizeof(rp));
3194 
3195 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3196 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3197 				    MGMT_STATUS_INVALID_PARAMS,
3198 				    &rp, sizeof(rp));
3199 
3200 	hci_dev_lock(hdev);
3201 
3202 	if (!hdev_is_powered(hdev)) {
3203 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3204 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3205 		goto unlock;
3206 	}
3207 
3208 	sec_level = BT_SECURITY_MEDIUM;
3209 	auth_type = HCI_AT_DEDICATED_BONDING;
3210 
3211 	if (cp->addr.type == BDADDR_BREDR) {
3212 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3213 				       auth_type);
3214 	} else {
3215 		u8 addr_type;
3216 
3217 		/* Convert from L2CAP channel address type to HCI address type
3218 		 */
3219 		if (cp->addr.type == BDADDR_LE_PUBLIC)
3220 			addr_type = ADDR_LE_DEV_PUBLIC;
3221 		else
3222 			addr_type = ADDR_LE_DEV_RANDOM;
3223 
3224 		/* When pairing a new device, it is expected to remember
3225 		 * this device for future connections. Adding the connection
3226 		 * parameter information ahead of time allows tracking
3227 		 * of the slave preferred values and will speed up any
3228 		 * further connection establishment.
3229 		 *
3230 		 * If connection parameters already exist, then they
3231 		 * will be kept and this function does nothing.
3232 		 */
3233 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3234 
3235 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3236 				      sec_level, HCI_LE_CONN_TIMEOUT,
3237 				      HCI_ROLE_MASTER);
3238 	}
3239 
3240 	if (IS_ERR(conn)) {
3241 		int status;
3242 
3243 		if (PTR_ERR(conn) == -EBUSY)
3244 			status = MGMT_STATUS_BUSY;
3245 		else
3246 			status = MGMT_STATUS_CONNECT_FAILED;
3247 
3248 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3249 				   status, &rp,
3250 				   sizeof(rp));
3251 		goto unlock;
3252 	}
3253 
3254 	if (conn->connect_cfm_cb) {
3255 		hci_conn_drop(conn);
3256 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3257 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
3258 		goto unlock;
3259 	}
3260 
3261 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3262 	if (!cmd) {
3263 		err = -ENOMEM;
3264 		hci_conn_drop(conn);
3265 		goto unlock;
3266 	}
3267 
3268 	cmd->cmd_complete = pairing_complete;
3269 
3270 	/* For LE, just connecting isn't a proof that the pairing finished */
3271 	if (cp->addr.type == BDADDR_BREDR) {
3272 		conn->connect_cfm_cb = pairing_complete_cb;
3273 		conn->security_cfm_cb = pairing_complete_cb;
3274 		conn->disconn_cfm_cb = pairing_complete_cb;
3275 	} else {
3276 		conn->connect_cfm_cb = le_pairing_complete_cb;
3277 		conn->security_cfm_cb = le_pairing_complete_cb;
3278 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3279 	}
3280 
3281 	conn->io_capability = cp->io_cap;
3282 	cmd->user_data = hci_conn_get(conn);
3283 
3284 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3285 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3286 		cmd->cmd_complete(cmd, 0);
3287 		mgmt_pending_remove(cmd);
3288 	}
3289 
3290 	err = 0;
3291 
3292 unlock:
3293 	hci_dev_unlock(hdev);
3294 	return err;
3295 }
3296 
3297 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3298 			      u16 len)
3299 {
3300 	struct mgmt_addr_info *addr = data;
3301 	struct pending_cmd *cmd;
3302 	struct hci_conn *conn;
3303 	int err;
3304 
3305 	BT_DBG("");
3306 
3307 	hci_dev_lock(hdev);
3308 
3309 	if (!hdev_is_powered(hdev)) {
3310 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3311 				 MGMT_STATUS_NOT_POWERED);
3312 		goto unlock;
3313 	}
3314 
3315 	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3316 	if (!cmd) {
3317 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3318 				 MGMT_STATUS_INVALID_PARAMS);
3319 		goto unlock;
3320 	}
3321 
3322 	conn = cmd->user_data;
3323 
3324 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3325 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3326 				 MGMT_STATUS_INVALID_PARAMS);
3327 		goto unlock;
3328 	}
3329 
3330 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3331 	mgmt_pending_remove(cmd);
3332 
3333 	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3334 			   addr, sizeof(*addr));
3335 unlock:
3336 	hci_dev_unlock(hdev);
3337 	return err;
3338 }
3339 
3340 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3341 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3342 			     u16 hci_op, __le32 passkey)
3343 {
3344 	struct pending_cmd *cmd;
3345 	struct hci_conn *conn;
3346 	int err;
3347 
3348 	hci_dev_lock(hdev);
3349 
3350 	if (!hdev_is_powered(hdev)) {
3351 		err = cmd_complete(sk, hdev->id, mgmt_op,
3352 				   MGMT_STATUS_NOT_POWERED, addr,
3353 				   sizeof(*addr));
3354 		goto done;
3355 	}
3356 
3357 	if (addr->type == BDADDR_BREDR)
3358 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3359 	else
3360 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3361 
3362 	if (!conn) {
3363 		err = cmd_complete(sk, hdev->id, mgmt_op,
3364 				   MGMT_STATUS_NOT_CONNECTED, addr,
3365 				   sizeof(*addr));
3366 		goto done;
3367 	}
3368 
3369 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3370 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3371 		if (!err)
3372 			err = cmd_complete(sk, hdev->id, mgmt_op,
3373 					   MGMT_STATUS_SUCCESS, addr,
3374 					   sizeof(*addr));
3375 		else
3376 			err = cmd_complete(sk, hdev->id, mgmt_op,
3377 					   MGMT_STATUS_FAILED, addr,
3378 					   sizeof(*addr));
3379 
3380 		goto done;
3381 	}
3382 
3383 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3384 	if (!cmd) {
3385 		err = -ENOMEM;
3386 		goto done;
3387 	}
3388 
3389 	cmd->cmd_complete = addr_cmd_complete;
3390 
3391 	/* Continue with pairing via HCI */
3392 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3393 		struct hci_cp_user_passkey_reply cp;
3394 
3395 		bacpy(&cp.bdaddr, &addr->bdaddr);
3396 		cp.passkey = passkey;
3397 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3398 	} else
3399 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3400 				   &addr->bdaddr);
3401 
3402 	if (err < 0)
3403 		mgmt_pending_remove(cmd);
3404 
3405 done:
3406 	hci_dev_unlock(hdev);
3407 	return err;
3408 }
3409 
3410 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3411 			      void *data, u16 len)
3412 {
3413 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3414 
3415 	BT_DBG("");
3416 
3417 	return user_pairing_resp(sk, hdev, &cp->addr,
3418 				MGMT_OP_PIN_CODE_NEG_REPLY,
3419 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3420 }
3421 
3422 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3423 			      u16 len)
3424 {
3425 	struct mgmt_cp_user_confirm_reply *cp = data;
3426 
3427 	BT_DBG("");
3428 
3429 	if (len != sizeof(*cp))
3430 		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3431 				  MGMT_STATUS_INVALID_PARAMS);
3432 
3433 	return user_pairing_resp(sk, hdev, &cp->addr,
3434 				 MGMT_OP_USER_CONFIRM_REPLY,
3435 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3436 }
3437 
3438 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3439 				  void *data, u16 len)
3440 {
3441 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3442 
3443 	BT_DBG("");
3444 
3445 	return user_pairing_resp(sk, hdev, &cp->addr,
3446 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3447 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3448 }
3449 
3450 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3451 			      u16 len)
3452 {
3453 	struct mgmt_cp_user_passkey_reply *cp = data;
3454 
3455 	BT_DBG("");
3456 
3457 	return user_pairing_resp(sk, hdev, &cp->addr,
3458 				 MGMT_OP_USER_PASSKEY_REPLY,
3459 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3460 }
3461 
3462 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3463 				  void *data, u16 len)
3464 {
3465 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3466 
3467 	BT_DBG("");
3468 
3469 	return user_pairing_resp(sk, hdev, &cp->addr,
3470 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3471 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3472 }
3473 
3474 static void update_name(struct hci_request *req)
3475 {
3476 	struct hci_dev *hdev = req->hdev;
3477 	struct hci_cp_write_local_name cp;
3478 
3479 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3480 
3481 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3482 }
3483 
3484 static void set_name_complete(struct hci_dev *hdev, u8 status)
3485 {
3486 	struct mgmt_cp_set_local_name *cp;
3487 	struct pending_cmd *cmd;
3488 
3489 	BT_DBG("status 0x%02x", status);
3490 
3491 	hci_dev_lock(hdev);
3492 
3493 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3494 	if (!cmd)
3495 		goto unlock;
3496 
3497 	cp = cmd->param;
3498 
3499 	if (status)
3500 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3501 			   mgmt_status(status));
3502 	else
3503 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3504 			     cp, sizeof(*cp));
3505 
3506 	mgmt_pending_remove(cmd);
3507 
3508 unlock:
3509 	hci_dev_unlock(hdev);
3510 }
3511 
3512 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3513 			  u16 len)
3514 {
3515 	struct mgmt_cp_set_local_name *cp = data;
3516 	struct pending_cmd *cmd;
3517 	struct hci_request req;
3518 	int err;
3519 
3520 	BT_DBG("");
3521 
3522 	hci_dev_lock(hdev);
3523 
3524 	/* If the old values are the same as the new ones just return a
3525 	 * direct command complete event.
3526 	 */
3527 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3528 	    !memcmp(hdev->short_name, cp->short_name,
3529 		    sizeof(hdev->short_name))) {
3530 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3531 				   data, len);
3532 		goto failed;
3533 	}
3534 
3535 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3536 
3537 	if (!hdev_is_powered(hdev)) {
3538 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3539 
3540 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3541 				   data, len);
3542 		if (err < 0)
3543 			goto failed;
3544 
3545 		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3546 				 sk);
3547 
3548 		goto failed;
3549 	}
3550 
3551 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3552 	if (!cmd) {
3553 		err = -ENOMEM;
3554 		goto failed;
3555 	}
3556 
3557 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3558 
3559 	hci_req_init(&req, hdev);
3560 
3561 	if (lmp_bredr_capable(hdev)) {
3562 		update_name(&req);
3563 		update_eir(&req);
3564 	}
3565 
3566 	/* The name is stored in the scan response data and so
3567 	 * no need to udpate the advertising data here.
3568 	 */
3569 	if (lmp_le_capable(hdev))
3570 		update_scan_rsp_data(&req);
3571 
3572 	err = hci_req_run(&req, set_name_complete);
3573 	if (err < 0)
3574 		mgmt_pending_remove(cmd);
3575 
3576 failed:
3577 	hci_dev_unlock(hdev);
3578 	return err;
3579 }
3580 
3581 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3582 			       void *data, u16 data_len)
3583 {
3584 	struct pending_cmd *cmd;
3585 	int err;
3586 
3587 	BT_DBG("%s", hdev->name);
3588 
3589 	hci_dev_lock(hdev);
3590 
3591 	if (!hdev_is_powered(hdev)) {
3592 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3593 				 MGMT_STATUS_NOT_POWERED);
3594 		goto unlock;
3595 	}
3596 
3597 	if (!lmp_ssp_capable(hdev)) {
3598 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3599 				 MGMT_STATUS_NOT_SUPPORTED);
3600 		goto unlock;
3601 	}
3602 
3603 	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3604 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3605 				 MGMT_STATUS_BUSY);
3606 		goto unlock;
3607 	}
3608 
3609 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3610 	if (!cmd) {
3611 		err = -ENOMEM;
3612 		goto unlock;
3613 	}
3614 
3615 	if (bredr_sc_enabled(hdev))
3616 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3617 				   0, NULL);
3618 	else
3619 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3620 
3621 	if (err < 0)
3622 		mgmt_pending_remove(cmd);
3623 
3624 unlock:
3625 	hci_dev_unlock(hdev);
3626 	return err;
3627 }
3628 
3629 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3630 			       void *data, u16 len)
3631 {
3632 	int err;
3633 
3634 	BT_DBG("%s ", hdev->name);
3635 
3636 	hci_dev_lock(hdev);
3637 
3638 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3639 		struct mgmt_cp_add_remote_oob_data *cp = data;
3640 		u8 status;
3641 
3642 		if (cp->addr.type != BDADDR_BREDR) {
3643 			err = cmd_complete(sk, hdev->id,
3644 					   MGMT_OP_ADD_REMOTE_OOB_DATA,
3645 					   MGMT_STATUS_INVALID_PARAMS,
3646 					   &cp->addr, sizeof(cp->addr));
3647 			goto unlock;
3648 		}
3649 
3650 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3651 					      cp->addr.type, cp->hash,
3652 					      cp->rand, NULL, NULL);
3653 		if (err < 0)
3654 			status = MGMT_STATUS_FAILED;
3655 		else
3656 			status = MGMT_STATUS_SUCCESS;
3657 
3658 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3659 				   status, &cp->addr, sizeof(cp->addr));
3660 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3661 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3662 		u8 *rand192, *hash192;
3663 		u8 status;
3664 
3665 		if (cp->addr.type != BDADDR_BREDR) {
3666 			err = cmd_complete(sk, hdev->id,
3667 					   MGMT_OP_ADD_REMOTE_OOB_DATA,
3668 					   MGMT_STATUS_INVALID_PARAMS,
3669 					   &cp->addr, sizeof(cp->addr));
3670 			goto unlock;
3671 		}
3672 
3673 		if (bdaddr_type_is_le(cp->addr.type)) {
3674 			rand192 = NULL;
3675 			hash192 = NULL;
3676 		} else {
3677 			rand192 = cp->rand192;
3678 			hash192 = cp->hash192;
3679 		}
3680 
3681 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3682 					      cp->addr.type, hash192, rand192,
3683 					      cp->hash256, cp->rand256);
3684 		if (err < 0)
3685 			status = MGMT_STATUS_FAILED;
3686 		else
3687 			status = MGMT_STATUS_SUCCESS;
3688 
3689 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3690 				   status, &cp->addr, sizeof(cp->addr));
3691 	} else {
3692 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3693 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3694 				 MGMT_STATUS_INVALID_PARAMS);
3695 	}
3696 
3697 unlock:
3698 	hci_dev_unlock(hdev);
3699 	return err;
3700 }
3701 
3702 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3703 				  void *data, u16 len)
3704 {
3705 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3706 	u8 status;
3707 	int err;
3708 
3709 	BT_DBG("%s", hdev->name);
3710 
3711 	if (cp->addr.type != BDADDR_BREDR)
3712 		return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3713 				    MGMT_STATUS_INVALID_PARAMS,
3714 				    &cp->addr, sizeof(cp->addr));
3715 
3716 	hci_dev_lock(hdev);
3717 
3718 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3719 		hci_remote_oob_data_clear(hdev);
3720 		status = MGMT_STATUS_SUCCESS;
3721 		goto done;
3722 	}
3723 
3724 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3725 	if (err < 0)
3726 		status = MGMT_STATUS_INVALID_PARAMS;
3727 	else
3728 		status = MGMT_STATUS_SUCCESS;
3729 
3730 done:
3731 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3732 			   status, &cp->addr, sizeof(cp->addr));
3733 
3734 	hci_dev_unlock(hdev);
3735 	return err;
3736 }
3737 
3738 static bool trigger_discovery(struct hci_request *req, u8 *status)
3739 {
3740 	struct hci_dev *hdev = req->hdev;
3741 	struct hci_cp_le_set_scan_param param_cp;
3742 	struct hci_cp_le_set_scan_enable enable_cp;
3743 	struct hci_cp_inquiry inq_cp;
3744 	/* General inquiry access code (GIAC) */
3745 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3746 	u8 own_addr_type;
3747 	int err;
3748 
3749 	switch (hdev->discovery.type) {
3750 	case DISCOV_TYPE_BREDR:
3751 		*status = mgmt_bredr_support(hdev);
3752 		if (*status)
3753 			return false;
3754 
3755 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3756 			*status = MGMT_STATUS_BUSY;
3757 			return false;
3758 		}
3759 
3760 		hci_inquiry_cache_flush(hdev);
3761 
3762 		memset(&inq_cp, 0, sizeof(inq_cp));
3763 		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3764 		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3765 		hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3766 		break;
3767 
3768 	case DISCOV_TYPE_LE:
3769 	case DISCOV_TYPE_INTERLEAVED:
3770 		*status = mgmt_le_support(hdev);
3771 		if (*status)
3772 			return false;
3773 
3774 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3775 		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3776 			*status = MGMT_STATUS_NOT_SUPPORTED;
3777 			return false;
3778 		}
3779 
3780 		if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3781 			/* Don't let discovery abort an outgoing
3782 			 * connection attempt that's using directed
3783 			 * advertising.
3784 			 */
3785 			if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3786 						       BT_CONNECT)) {
3787 				*status = MGMT_STATUS_REJECTED;
3788 				return false;
3789 			}
3790 
3791 			disable_advertising(req);
3792 		}
3793 
3794 		/* If controller is scanning, it means the background scanning
3795 		 * is running. Thus, we should temporarily stop it in order to
3796 		 * set the discovery scanning parameters.
3797 		 */
3798 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3799 			hci_req_add_le_scan_disable(req);
3800 
3801 		memset(&param_cp, 0, sizeof(param_cp));
3802 
3803 		/* All active scans will be done with either a resolvable
3804 		 * private address (when privacy feature has been enabled)
3805 		 * or non-resolvable private address.
3806 		 */
3807 		err = hci_update_random_address(req, true, &own_addr_type);
3808 		if (err < 0) {
3809 			*status = MGMT_STATUS_FAILED;
3810 			return false;
3811 		}
3812 
3813 		param_cp.type = LE_SCAN_ACTIVE;
3814 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3815 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3816 		param_cp.own_address_type = own_addr_type;
3817 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3818 			    &param_cp);
3819 
3820 		memset(&enable_cp, 0, sizeof(enable_cp));
3821 		enable_cp.enable = LE_SCAN_ENABLE;
3822 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3823 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3824 			    &enable_cp);
3825 		break;
3826 
3827 	default:
3828 		*status = MGMT_STATUS_INVALID_PARAMS;
3829 		return false;
3830 	}
3831 
3832 	return true;
3833 }
3834 
3835 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3836 {
3837 	struct pending_cmd *cmd;
3838 	unsigned long timeout;
3839 
3840 	BT_DBG("status %d", status);
3841 
3842 	hci_dev_lock(hdev);
3843 
3844 	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3845 	if (!cmd)
3846 		cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3847 
3848 	if (cmd) {
3849 		cmd->cmd_complete(cmd, mgmt_status(status));
3850 		mgmt_pending_remove(cmd);
3851 	}
3852 
3853 	if (status) {
3854 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3855 		goto unlock;
3856 	}
3857 
3858 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3859 
3860 	switch (hdev->discovery.type) {
3861 	case DISCOV_TYPE_LE:
3862 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3863 		break;
3864 	case DISCOV_TYPE_INTERLEAVED:
3865 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3866 		break;
3867 	case DISCOV_TYPE_BREDR:
3868 		timeout = 0;
3869 		break;
3870 	default:
3871 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3872 		timeout = 0;
3873 		break;
3874 	}
3875 
3876 	if (timeout)
3877 		queue_delayed_work(hdev->workqueue,
3878 				   &hdev->le_scan_disable, timeout);
3879 
3880 unlock:
3881 	hci_dev_unlock(hdev);
3882 }
3883 
3884 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3885 			   void *data, u16 len)
3886 {
3887 	struct mgmt_cp_start_discovery *cp = data;
3888 	struct pending_cmd *cmd;
3889 	struct hci_request req;
3890 	u8 status;
3891 	int err;
3892 
3893 	BT_DBG("%s", hdev->name);
3894 
3895 	hci_dev_lock(hdev);
3896 
3897 	if (!hdev_is_powered(hdev)) {
3898 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3899 				   MGMT_STATUS_NOT_POWERED,
3900 				   &cp->type, sizeof(cp->type));
3901 		goto failed;
3902 	}
3903 
3904 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3905 	    test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3906 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3907 				   MGMT_STATUS_BUSY, &cp->type,
3908 				   sizeof(cp->type));
3909 		goto failed;
3910 	}
3911 
3912 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3913 	if (!cmd) {
3914 		err = -ENOMEM;
3915 		goto failed;
3916 	}
3917 
3918 	cmd->cmd_complete = generic_cmd_complete;
3919 
3920 	/* Clear the discovery filter first to free any previously
3921 	 * allocated memory for the UUID list.
3922 	 */
3923 	hci_discovery_filter_clear(hdev);
3924 
3925 	hdev->discovery.type = cp->type;
3926 	hdev->discovery.report_invalid_rssi = false;
3927 
3928 	hci_req_init(&req, hdev);
3929 
3930 	if (!trigger_discovery(&req, &status)) {
3931 		err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3932 				   status, &cp->type, sizeof(cp->type));
3933 		mgmt_pending_remove(cmd);
3934 		goto failed;
3935 	}
3936 
3937 	err = hci_req_run(&req, start_discovery_complete);
3938 	if (err < 0) {
3939 		mgmt_pending_remove(cmd);
3940 		goto failed;
3941 	}
3942 
3943 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3944 
3945 failed:
3946 	hci_dev_unlock(hdev);
3947 	return err;
3948 }
3949 
3950 static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3951 {
3952 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
3953 }
3954 
3955 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3956 				   void *data, u16 len)
3957 {
3958 	struct mgmt_cp_start_service_discovery *cp = data;
3959 	struct pending_cmd *cmd;
3960 	struct hci_request req;
3961 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3962 	u16 uuid_count, expected_len;
3963 	u8 status;
3964 	int err;
3965 
3966 	BT_DBG("%s", hdev->name);
3967 
3968 	hci_dev_lock(hdev);
3969 
3970 	if (!hdev_is_powered(hdev)) {
3971 		err = cmd_complete(sk, hdev->id,
3972 				   MGMT_OP_START_SERVICE_DISCOVERY,
3973 				   MGMT_STATUS_NOT_POWERED,
3974 				   &cp->type, sizeof(cp->type));
3975 		goto failed;
3976 	}
3977 
3978 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3979 	    test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3980 		err = cmd_complete(sk, hdev->id,
3981 				   MGMT_OP_START_SERVICE_DISCOVERY,
3982 				   MGMT_STATUS_BUSY, &cp->type,
3983 				   sizeof(cp->type));
3984 		goto failed;
3985 	}
3986 
3987 	uuid_count = __le16_to_cpu(cp->uuid_count);
3988 	if (uuid_count > max_uuid_count) {
3989 		BT_ERR("service_discovery: too big uuid_count value %u",
3990 		       uuid_count);
3991 		err = cmd_complete(sk, hdev->id,
3992 				   MGMT_OP_START_SERVICE_DISCOVERY,
3993 				   MGMT_STATUS_INVALID_PARAMS, &cp->type,
3994 				   sizeof(cp->type));
3995 		goto failed;
3996 	}
3997 
3998 	expected_len = sizeof(*cp) + uuid_count * 16;
3999 	if (expected_len != len) {
4000 		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4001 		       expected_len, len);
4002 		err = cmd_complete(sk, hdev->id,
4003 				   MGMT_OP_START_SERVICE_DISCOVERY,
4004 				   MGMT_STATUS_INVALID_PARAMS, &cp->type,
4005 				   sizeof(cp->type));
4006 		goto failed;
4007 	}
4008 
4009 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4010 			       hdev, data, len);
4011 	if (!cmd) {
4012 		err = -ENOMEM;
4013 		goto failed;
4014 	}
4015 
4016 	cmd->cmd_complete = service_discovery_cmd_complete;
4017 
4018 	/* Clear the discovery filter first to free any previously
4019 	 * allocated memory for the UUID list.
4020 	 */
4021 	hci_discovery_filter_clear(hdev);
4022 
4023 	hdev->discovery.type = cp->type;
4024 	hdev->discovery.rssi = cp->rssi;
4025 	hdev->discovery.uuid_count = uuid_count;
4026 
4027 	if (uuid_count > 0) {
4028 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4029 						GFP_KERNEL);
4030 		if (!hdev->discovery.uuids) {
4031 			err = cmd_complete(sk, hdev->id,
4032 					   MGMT_OP_START_SERVICE_DISCOVERY,
4033 					   MGMT_STATUS_FAILED,
4034 					   &cp->type, sizeof(cp->type));
4035 			mgmt_pending_remove(cmd);
4036 			goto failed;
4037 		}
4038 	}
4039 
4040 	hci_req_init(&req, hdev);
4041 
4042 	if (!trigger_discovery(&req, &status)) {
4043 		err = cmd_complete(sk, hdev->id,
4044 				   MGMT_OP_START_SERVICE_DISCOVERY,
4045 				   status, &cp->type, sizeof(cp->type));
4046 		mgmt_pending_remove(cmd);
4047 		goto failed;
4048 	}
4049 
4050 	err = hci_req_run(&req, start_discovery_complete);
4051 	if (err < 0) {
4052 		mgmt_pending_remove(cmd);
4053 		goto failed;
4054 	}
4055 
4056 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4057 
4058 failed:
4059 	hci_dev_unlock(hdev);
4060 	return err;
4061 }
4062 
4063 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
4064 {
4065 	struct pending_cmd *cmd;
4066 
4067 	BT_DBG("status %d", status);
4068 
4069 	hci_dev_lock(hdev);
4070 
4071 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4072 	if (cmd) {
4073 		cmd->cmd_complete(cmd, mgmt_status(status));
4074 		mgmt_pending_remove(cmd);
4075 	}
4076 
4077 	if (!status)
4078 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4079 
4080 	hci_dev_unlock(hdev);
4081 }
4082 
4083 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4084 			  u16 len)
4085 {
4086 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4087 	struct pending_cmd *cmd;
4088 	struct hci_request req;
4089 	int err;
4090 
4091 	BT_DBG("%s", hdev->name);
4092 
4093 	hci_dev_lock(hdev);
4094 
4095 	if (!hci_discovery_active(hdev)) {
4096 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4097 				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
4098 				   sizeof(mgmt_cp->type));
4099 		goto unlock;
4100 	}
4101 
4102 	if (hdev->discovery.type != mgmt_cp->type) {
4103 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4104 				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4105 				   sizeof(mgmt_cp->type));
4106 		goto unlock;
4107 	}
4108 
4109 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4110 	if (!cmd) {
4111 		err = -ENOMEM;
4112 		goto unlock;
4113 	}
4114 
4115 	cmd->cmd_complete = generic_cmd_complete;
4116 
4117 	hci_req_init(&req, hdev);
4118 
4119 	hci_stop_discovery(&req);
4120 
4121 	err = hci_req_run(&req, stop_discovery_complete);
4122 	if (!err) {
4123 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4124 		goto unlock;
4125 	}
4126 
4127 	mgmt_pending_remove(cmd);
4128 
4129 	/* If no HCI commands were sent we're done */
4130 	if (err == -ENODATA) {
4131 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4132 				   &mgmt_cp->type, sizeof(mgmt_cp->type));
4133 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4134 	}
4135 
4136 unlock:
4137 	hci_dev_unlock(hdev);
4138 	return err;
4139 }
4140 
4141 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4142 			u16 len)
4143 {
4144 	struct mgmt_cp_confirm_name *cp = data;
4145 	struct inquiry_entry *e;
4146 	int err;
4147 
4148 	BT_DBG("%s", hdev->name);
4149 
4150 	hci_dev_lock(hdev);
4151 
4152 	if (!hci_discovery_active(hdev)) {
4153 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4154 				   MGMT_STATUS_FAILED, &cp->addr,
4155 				   sizeof(cp->addr));
4156 		goto failed;
4157 	}
4158 
4159 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4160 	if (!e) {
4161 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4162 				   MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4163 				   sizeof(cp->addr));
4164 		goto failed;
4165 	}
4166 
4167 	if (cp->name_known) {
4168 		e->name_state = NAME_KNOWN;
4169 		list_del(&e->list);
4170 	} else {
4171 		e->name_state = NAME_NEEDED;
4172 		hci_inquiry_cache_update_resolve(hdev, e);
4173 	}
4174 
4175 	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4176 			   sizeof(cp->addr));
4177 
4178 failed:
4179 	hci_dev_unlock(hdev);
4180 	return err;
4181 }
4182 
4183 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4184 			u16 len)
4185 {
4186 	struct mgmt_cp_block_device *cp = data;
4187 	u8 status;
4188 	int err;
4189 
4190 	BT_DBG("%s", hdev->name);
4191 
4192 	if (!bdaddr_type_is_valid(cp->addr.type))
4193 		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4194 				    MGMT_STATUS_INVALID_PARAMS,
4195 				    &cp->addr, sizeof(cp->addr));
4196 
4197 	hci_dev_lock(hdev);
4198 
4199 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4200 				  cp->addr.type);
4201 	if (err < 0) {
4202 		status = MGMT_STATUS_FAILED;
4203 		goto done;
4204 	}
4205 
4206 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4207 		   sk);
4208 	status = MGMT_STATUS_SUCCESS;
4209 
4210 done:
4211 	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4212 			   &cp->addr, sizeof(cp->addr));
4213 
4214 	hci_dev_unlock(hdev);
4215 
4216 	return err;
4217 }
4218 
4219 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4220 			  u16 len)
4221 {
4222 	struct mgmt_cp_unblock_device *cp = data;
4223 	u8 status;
4224 	int err;
4225 
4226 	BT_DBG("%s", hdev->name);
4227 
4228 	if (!bdaddr_type_is_valid(cp->addr.type))
4229 		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4230 				    MGMT_STATUS_INVALID_PARAMS,
4231 				    &cp->addr, sizeof(cp->addr));
4232 
4233 	hci_dev_lock(hdev);
4234 
4235 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4236 				  cp->addr.type);
4237 	if (err < 0) {
4238 		status = MGMT_STATUS_INVALID_PARAMS;
4239 		goto done;
4240 	}
4241 
4242 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4243 		   sk);
4244 	status = MGMT_STATUS_SUCCESS;
4245 
4246 done:
4247 	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4248 			   &cp->addr, sizeof(cp->addr));
4249 
4250 	hci_dev_unlock(hdev);
4251 
4252 	return err;
4253 }
4254 
4255 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4256 			 u16 len)
4257 {
4258 	struct mgmt_cp_set_device_id *cp = data;
4259 	struct hci_request req;
4260 	int err;
4261 	__u16 source;
4262 
4263 	BT_DBG("%s", hdev->name);
4264 
4265 	source = __le16_to_cpu(cp->source);
4266 
4267 	if (source > 0x0002)
4268 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4269 				  MGMT_STATUS_INVALID_PARAMS);
4270 
4271 	hci_dev_lock(hdev);
4272 
4273 	hdev->devid_source = source;
4274 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4275 	hdev->devid_product = __le16_to_cpu(cp->product);
4276 	hdev->devid_version = __le16_to_cpu(cp->version);
4277 
4278 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4279 
4280 	hci_req_init(&req, hdev);
4281 	update_eir(&req);
4282 	hci_req_run(&req, NULL);
4283 
4284 	hci_dev_unlock(hdev);
4285 
4286 	return err;
4287 }
4288 
4289 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4290 {
4291 	struct cmd_lookup match = { NULL, hdev };
4292 
4293 	hci_dev_lock(hdev);
4294 
4295 	if (status) {
4296 		u8 mgmt_err = mgmt_status(status);
4297 
4298 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4299 				     cmd_status_rsp, &mgmt_err);
4300 		goto unlock;
4301 	}
4302 
4303 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4304 		set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4305 	else
4306 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4307 
4308 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4309 			     &match);
4310 
4311 	new_settings(hdev, match.sk);
4312 
4313 	if (match.sk)
4314 		sock_put(match.sk);
4315 
4316 unlock:
4317 	hci_dev_unlock(hdev);
4318 }
4319 
4320 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4321 			   u16 len)
4322 {
4323 	struct mgmt_mode *cp = data;
4324 	struct pending_cmd *cmd;
4325 	struct hci_request req;
4326 	u8 val, enabled, status;
4327 	int err;
4328 
4329 	BT_DBG("request for %s", hdev->name);
4330 
4331 	status = mgmt_le_support(hdev);
4332 	if (status)
4333 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4334 				  status);
4335 
4336 	if (cp->val != 0x00 && cp->val != 0x01)
4337 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4338 				  MGMT_STATUS_INVALID_PARAMS);
4339 
4340 	hci_dev_lock(hdev);
4341 
4342 	val = !!cp->val;
4343 	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4344 
4345 	/* The following conditions are ones which mean that we should
4346 	 * not do any HCI communication but directly send a mgmt
4347 	 * response to user space (after toggling the flag if
4348 	 * necessary).
4349 	 */
4350 	if (!hdev_is_powered(hdev) || val == enabled ||
4351 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4352 	    (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4353 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4354 		bool changed = false;
4355 
4356 		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4357 			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4358 			changed = true;
4359 		}
4360 
4361 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4362 		if (err < 0)
4363 			goto unlock;
4364 
4365 		if (changed)
4366 			err = new_settings(hdev, sk);
4367 
4368 		goto unlock;
4369 	}
4370 
4371 	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4372 	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4373 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4374 				 MGMT_STATUS_BUSY);
4375 		goto unlock;
4376 	}
4377 
4378 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4379 	if (!cmd) {
4380 		err = -ENOMEM;
4381 		goto unlock;
4382 	}
4383 
4384 	hci_req_init(&req, hdev);
4385 
4386 	if (val)
4387 		enable_advertising(&req);
4388 	else
4389 		disable_advertising(&req);
4390 
4391 	err = hci_req_run(&req, set_advertising_complete);
4392 	if (err < 0)
4393 		mgmt_pending_remove(cmd);
4394 
4395 unlock:
4396 	hci_dev_unlock(hdev);
4397 	return err;
4398 }
4399 
4400 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4401 			      void *data, u16 len)
4402 {
4403 	struct mgmt_cp_set_static_address *cp = data;
4404 	int err;
4405 
4406 	BT_DBG("%s", hdev->name);
4407 
4408 	if (!lmp_le_capable(hdev))
4409 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4410 				  MGMT_STATUS_NOT_SUPPORTED);
4411 
4412 	if (hdev_is_powered(hdev))
4413 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4414 				  MGMT_STATUS_REJECTED);
4415 
4416 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4417 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4418 			return cmd_status(sk, hdev->id,
4419 					  MGMT_OP_SET_STATIC_ADDRESS,
4420 					  MGMT_STATUS_INVALID_PARAMS);
4421 
4422 		/* Two most significant bits shall be set */
4423 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4424 			return cmd_status(sk, hdev->id,
4425 					  MGMT_OP_SET_STATIC_ADDRESS,
4426 					  MGMT_STATUS_INVALID_PARAMS);
4427 	}
4428 
4429 	hci_dev_lock(hdev);
4430 
4431 	bacpy(&hdev->static_addr, &cp->bdaddr);
4432 
4433 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4434 
4435 	hci_dev_unlock(hdev);
4436 
4437 	return err;
4438 }
4439 
4440 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4441 			   void *data, u16 len)
4442 {
4443 	struct mgmt_cp_set_scan_params *cp = data;
4444 	__u16 interval, window;
4445 	int err;
4446 
4447 	BT_DBG("%s", hdev->name);
4448 
4449 	if (!lmp_le_capable(hdev))
4450 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4451 				  MGMT_STATUS_NOT_SUPPORTED);
4452 
4453 	interval = __le16_to_cpu(cp->interval);
4454 
4455 	if (interval < 0x0004 || interval > 0x4000)
4456 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4457 				  MGMT_STATUS_INVALID_PARAMS);
4458 
4459 	window = __le16_to_cpu(cp->window);
4460 
4461 	if (window < 0x0004 || window > 0x4000)
4462 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4463 				  MGMT_STATUS_INVALID_PARAMS);
4464 
4465 	if (window > interval)
4466 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4467 				  MGMT_STATUS_INVALID_PARAMS);
4468 
4469 	hci_dev_lock(hdev);
4470 
4471 	hdev->le_scan_interval = interval;
4472 	hdev->le_scan_window = window;
4473 
4474 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4475 
4476 	/* If background scan is running, restart it so new parameters are
4477 	 * loaded.
4478 	 */
4479 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4480 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4481 		struct hci_request req;
4482 
4483 		hci_req_init(&req, hdev);
4484 
4485 		hci_req_add_le_scan_disable(&req);
4486 		hci_req_add_le_passive_scan(&req);
4487 
4488 		hci_req_run(&req, NULL);
4489 	}
4490 
4491 	hci_dev_unlock(hdev);
4492 
4493 	return err;
4494 }
4495 
4496 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4497 {
4498 	struct pending_cmd *cmd;
4499 
4500 	BT_DBG("status 0x%02x", status);
4501 
4502 	hci_dev_lock(hdev);
4503 
4504 	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4505 	if (!cmd)
4506 		goto unlock;
4507 
4508 	if (status) {
4509 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4510 			   mgmt_status(status));
4511 	} else {
4512 		struct mgmt_mode *cp = cmd->param;
4513 
4514 		if (cp->val)
4515 			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4516 		else
4517 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4518 
4519 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4520 		new_settings(hdev, cmd->sk);
4521 	}
4522 
4523 	mgmt_pending_remove(cmd);
4524 
4525 unlock:
4526 	hci_dev_unlock(hdev);
4527 }
4528 
4529 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4530 				void *data, u16 len)
4531 {
4532 	struct mgmt_mode *cp = data;
4533 	struct pending_cmd *cmd;
4534 	struct hci_request req;
4535 	int err;
4536 
4537 	BT_DBG("%s", hdev->name);
4538 
4539 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4540 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4541 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4542 				  MGMT_STATUS_NOT_SUPPORTED);
4543 
4544 	if (cp->val != 0x00 && cp->val != 0x01)
4545 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4546 				  MGMT_STATUS_INVALID_PARAMS);
4547 
4548 	if (!hdev_is_powered(hdev))
4549 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4550 				  MGMT_STATUS_NOT_POWERED);
4551 
4552 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4553 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4554 				  MGMT_STATUS_REJECTED);
4555 
4556 	hci_dev_lock(hdev);
4557 
4558 	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4559 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4560 				 MGMT_STATUS_BUSY);
4561 		goto unlock;
4562 	}
4563 
4564 	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4565 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4566 					hdev);
4567 		goto unlock;
4568 	}
4569 
4570 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4571 			       data, len);
4572 	if (!cmd) {
4573 		err = -ENOMEM;
4574 		goto unlock;
4575 	}
4576 
4577 	hci_req_init(&req, hdev);
4578 
4579 	write_fast_connectable(&req, cp->val);
4580 
4581 	err = hci_req_run(&req, fast_connectable_complete);
4582 	if (err < 0) {
4583 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4584 				 MGMT_STATUS_FAILED);
4585 		mgmt_pending_remove(cmd);
4586 	}
4587 
4588 unlock:
4589 	hci_dev_unlock(hdev);
4590 
4591 	return err;
4592 }
4593 
4594 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4595 {
4596 	struct pending_cmd *cmd;
4597 
4598 	BT_DBG("status 0x%02x", status);
4599 
4600 	hci_dev_lock(hdev);
4601 
4602 	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4603 	if (!cmd)
4604 		goto unlock;
4605 
4606 	if (status) {
4607 		u8 mgmt_err = mgmt_status(status);
4608 
4609 		/* We need to restore the flag if related HCI commands
4610 		 * failed.
4611 		 */
4612 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4613 
4614 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4615 	} else {
4616 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4617 		new_settings(hdev, cmd->sk);
4618 	}
4619 
4620 	mgmt_pending_remove(cmd);
4621 
4622 unlock:
4623 	hci_dev_unlock(hdev);
4624 }
4625 
4626 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4627 {
4628 	struct mgmt_mode *cp = data;
4629 	struct pending_cmd *cmd;
4630 	struct hci_request req;
4631 	int err;
4632 
4633 	BT_DBG("request for %s", hdev->name);
4634 
4635 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4636 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4637 				  MGMT_STATUS_NOT_SUPPORTED);
4638 
4639 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4640 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4641 				  MGMT_STATUS_REJECTED);
4642 
4643 	if (cp->val != 0x00 && cp->val != 0x01)
4644 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4645 				  MGMT_STATUS_INVALID_PARAMS);
4646 
4647 	hci_dev_lock(hdev);
4648 
4649 	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4650 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4651 		goto unlock;
4652 	}
4653 
4654 	if (!hdev_is_powered(hdev)) {
4655 		if (!cp->val) {
4656 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4657 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4658 			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4659 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4660 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4661 		}
4662 
4663 		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4664 
4665 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4666 		if (err < 0)
4667 			goto unlock;
4668 
4669 		err = new_settings(hdev, sk);
4670 		goto unlock;
4671 	}
4672 
4673 	/* Reject disabling when powered on */
4674 	if (!cp->val) {
4675 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4676 				 MGMT_STATUS_REJECTED);
4677 		goto unlock;
4678 	}
4679 
4680 	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4681 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4682 				 MGMT_STATUS_BUSY);
4683 		goto unlock;
4684 	}
4685 
4686 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4687 	if (!cmd) {
4688 		err = -ENOMEM;
4689 		goto unlock;
4690 	}
4691 
4692 	/* We need to flip the bit already here so that update_adv_data
4693 	 * generates the correct flags.
4694 	 */
4695 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4696 
4697 	hci_req_init(&req, hdev);
4698 
4699 	write_fast_connectable(&req, false);
4700 	hci_update_page_scan(hdev, &req);
4701 
4702 	/* Since only the advertising data flags will change, there
4703 	 * is no need to update the scan response data.
4704 	 */
4705 	update_adv_data(&req);
4706 
4707 	err = hci_req_run(&req, set_bredr_complete);
4708 	if (err < 0)
4709 		mgmt_pending_remove(cmd);
4710 
4711 unlock:
4712 	hci_dev_unlock(hdev);
4713 	return err;
4714 }
4715 
4716 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4717 			   void *data, u16 len)
4718 {
4719 	struct mgmt_mode *cp = data;
4720 	struct pending_cmd *cmd;
4721 	u8 val;
4722 	int err;
4723 
4724 	BT_DBG("request for %s", hdev->name);
4725 
4726 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4727 	    !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4728 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4729 				  MGMT_STATUS_NOT_SUPPORTED);
4730 
4731 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4732 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4733 				  MGMT_STATUS_INVALID_PARAMS);
4734 
4735 	hci_dev_lock(hdev);
4736 
4737 	if (!hdev_is_powered(hdev) ||
4738 	    (!lmp_sc_capable(hdev) &&
4739 	     !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4740 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4741 		bool changed;
4742 
4743 		if (cp->val) {
4744 			changed = !test_and_set_bit(HCI_SC_ENABLED,
4745 						    &hdev->dev_flags);
4746 			if (cp->val == 0x02)
4747 				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4748 			else
4749 				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4750 		} else {
4751 			changed = test_and_clear_bit(HCI_SC_ENABLED,
4752 						     &hdev->dev_flags);
4753 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4754 		}
4755 
4756 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4757 		if (err < 0)
4758 			goto failed;
4759 
4760 		if (changed)
4761 			err = new_settings(hdev, sk);
4762 
4763 		goto failed;
4764 	}
4765 
4766 	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4767 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4768 				 MGMT_STATUS_BUSY);
4769 		goto failed;
4770 	}
4771 
4772 	val = !!cp->val;
4773 
4774 	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4775 	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4776 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4777 		goto failed;
4778 	}
4779 
4780 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4781 	if (!cmd) {
4782 		err = -ENOMEM;
4783 		goto failed;
4784 	}
4785 
4786 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4787 	if (err < 0) {
4788 		mgmt_pending_remove(cmd);
4789 		goto failed;
4790 	}
4791 
4792 	if (cp->val == 0x02)
4793 		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4794 	else
4795 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4796 
4797 failed:
4798 	hci_dev_unlock(hdev);
4799 	return err;
4800 }
4801 
4802 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4803 			  void *data, u16 len)
4804 {
4805 	struct mgmt_mode *cp = data;
4806 	bool changed, use_changed;
4807 	int err;
4808 
4809 	BT_DBG("request for %s", hdev->name);
4810 
4811 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4812 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4813 				  MGMT_STATUS_INVALID_PARAMS);
4814 
4815 	hci_dev_lock(hdev);
4816 
4817 	if (cp->val)
4818 		changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4819 					    &hdev->dev_flags);
4820 	else
4821 		changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4822 					     &hdev->dev_flags);
4823 
4824 	if (cp->val == 0x02)
4825 		use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4826 						&hdev->dev_flags);
4827 	else
4828 		use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4829 						 &hdev->dev_flags);
4830 
4831 	if (hdev_is_powered(hdev) && use_changed &&
4832 	    test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4833 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4834 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4835 			     sizeof(mode), &mode);
4836 	}
4837 
4838 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4839 	if (err < 0)
4840 		goto unlock;
4841 
4842 	if (changed)
4843 		err = new_settings(hdev, sk);
4844 
4845 unlock:
4846 	hci_dev_unlock(hdev);
4847 	return err;
4848 }
4849 
4850 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4851 		       u16 len)
4852 {
4853 	struct mgmt_cp_set_privacy *cp = cp_data;
4854 	bool changed;
4855 	int err;
4856 
4857 	BT_DBG("request for %s", hdev->name);
4858 
4859 	if (!lmp_le_capable(hdev))
4860 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4861 				  MGMT_STATUS_NOT_SUPPORTED);
4862 
4863 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
4864 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4865 				  MGMT_STATUS_INVALID_PARAMS);
4866 
4867 	if (hdev_is_powered(hdev))
4868 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4869 				  MGMT_STATUS_REJECTED);
4870 
4871 	hci_dev_lock(hdev);
4872 
4873 	/* If user space supports this command it is also expected to
4874 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4875 	 */
4876 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4877 
4878 	if (cp->privacy) {
4879 		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4880 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4881 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4882 	} else {
4883 		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4884 		memset(hdev->irk, 0, sizeof(hdev->irk));
4885 		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4886 	}
4887 
4888 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4889 	if (err < 0)
4890 		goto unlock;
4891 
4892 	if (changed)
4893 		err = new_settings(hdev, sk);
4894 
4895 unlock:
4896 	hci_dev_unlock(hdev);
4897 	return err;
4898 }
4899 
4900 static bool irk_is_valid(struct mgmt_irk_info *irk)
4901 {
4902 	switch (irk->addr.type) {
4903 	case BDADDR_LE_PUBLIC:
4904 		return true;
4905 
4906 	case BDADDR_LE_RANDOM:
4907 		/* Two most significant bits shall be set */
4908 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4909 			return false;
4910 		return true;
4911 	}
4912 
4913 	return false;
4914 }
4915 
4916 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4917 		     u16 len)
4918 {
4919 	struct mgmt_cp_load_irks *cp = cp_data;
4920 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4921 				   sizeof(struct mgmt_irk_info));
4922 	u16 irk_count, expected_len;
4923 	int i, err;
4924 
4925 	BT_DBG("request for %s", hdev->name);
4926 
4927 	if (!lmp_le_capable(hdev))
4928 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4929 				  MGMT_STATUS_NOT_SUPPORTED);
4930 
4931 	irk_count = __le16_to_cpu(cp->irk_count);
4932 	if (irk_count > max_irk_count) {
4933 		BT_ERR("load_irks: too big irk_count value %u", irk_count);
4934 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4935 				  MGMT_STATUS_INVALID_PARAMS);
4936 	}
4937 
4938 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4939 	if (expected_len != len) {
4940 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4941 		       expected_len, len);
4942 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4943 				  MGMT_STATUS_INVALID_PARAMS);
4944 	}
4945 
4946 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4947 
4948 	for (i = 0; i < irk_count; i++) {
4949 		struct mgmt_irk_info *key = &cp->irks[i];
4950 
4951 		if (!irk_is_valid(key))
4952 			return cmd_status(sk, hdev->id,
4953 					  MGMT_OP_LOAD_IRKS,
4954 					  MGMT_STATUS_INVALID_PARAMS);
4955 	}
4956 
4957 	hci_dev_lock(hdev);
4958 
4959 	hci_smp_irks_clear(hdev);
4960 
4961 	for (i = 0; i < irk_count; i++) {
4962 		struct mgmt_irk_info *irk = &cp->irks[i];
4963 		u8 addr_type;
4964 
4965 		if (irk->addr.type == BDADDR_LE_PUBLIC)
4966 			addr_type = ADDR_LE_DEV_PUBLIC;
4967 		else
4968 			addr_type = ADDR_LE_DEV_RANDOM;
4969 
4970 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4971 			    BDADDR_ANY);
4972 	}
4973 
4974 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4975 
4976 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4977 
4978 	hci_dev_unlock(hdev);
4979 
4980 	return err;
4981 }
4982 
4983 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4984 {
4985 	if (key->master != 0x00 && key->master != 0x01)
4986 		return false;
4987 
4988 	switch (key->addr.type) {
4989 	case BDADDR_LE_PUBLIC:
4990 		return true;
4991 
4992 	case BDADDR_LE_RANDOM:
4993 		/* Two most significant bits shall be set */
4994 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4995 			return false;
4996 		return true;
4997 	}
4998 
4999 	return false;
5000 }
5001 
5002 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5003 			       void *cp_data, u16 len)
5004 {
5005 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5006 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5007 				   sizeof(struct mgmt_ltk_info));
5008 	u16 key_count, expected_len;
5009 	int i, err;
5010 
5011 	BT_DBG("request for %s", hdev->name);
5012 
5013 	if (!lmp_le_capable(hdev))
5014 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5015 				  MGMT_STATUS_NOT_SUPPORTED);
5016 
5017 	key_count = __le16_to_cpu(cp->key_count);
5018 	if (key_count > max_key_count) {
5019 		BT_ERR("load_ltks: too big key_count value %u", key_count);
5020 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5021 				  MGMT_STATUS_INVALID_PARAMS);
5022 	}
5023 
5024 	expected_len = sizeof(*cp) + key_count *
5025 					sizeof(struct mgmt_ltk_info);
5026 	if (expected_len != len) {
5027 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5028 		       expected_len, len);
5029 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5030 				  MGMT_STATUS_INVALID_PARAMS);
5031 	}
5032 
5033 	BT_DBG("%s key_count %u", hdev->name, key_count);
5034 
5035 	for (i = 0; i < key_count; i++) {
5036 		struct mgmt_ltk_info *key = &cp->keys[i];
5037 
5038 		if (!ltk_is_valid(key))
5039 			return cmd_status(sk, hdev->id,
5040 					  MGMT_OP_LOAD_LONG_TERM_KEYS,
5041 					  MGMT_STATUS_INVALID_PARAMS);
5042 	}
5043 
5044 	hci_dev_lock(hdev);
5045 
5046 	hci_smp_ltks_clear(hdev);
5047 
5048 	for (i = 0; i < key_count; i++) {
5049 		struct mgmt_ltk_info *key = &cp->keys[i];
5050 		u8 type, addr_type, authenticated;
5051 
5052 		if (key->addr.type == BDADDR_LE_PUBLIC)
5053 			addr_type = ADDR_LE_DEV_PUBLIC;
5054 		else
5055 			addr_type = ADDR_LE_DEV_RANDOM;
5056 
5057 		switch (key->type) {
5058 		case MGMT_LTK_UNAUTHENTICATED:
5059 			authenticated = 0x00;
5060 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5061 			break;
5062 		case MGMT_LTK_AUTHENTICATED:
5063 			authenticated = 0x01;
5064 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5065 			break;
5066 		case MGMT_LTK_P256_UNAUTH:
5067 			authenticated = 0x00;
5068 			type = SMP_LTK_P256;
5069 			break;
5070 		case MGMT_LTK_P256_AUTH:
5071 			authenticated = 0x01;
5072 			type = SMP_LTK_P256;
5073 			break;
5074 		case MGMT_LTK_P256_DEBUG:
5075 			authenticated = 0x00;
5076 			type = SMP_LTK_P256_DEBUG;
5077 		default:
5078 			continue;
5079 		}
5080 
5081 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5082 			    authenticated, key->val, key->enc_size, key->ediv,
5083 			    key->rand);
5084 	}
5085 
5086 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5087 			   NULL, 0);
5088 
5089 	hci_dev_unlock(hdev);
5090 
5091 	return err;
5092 }
5093 
5094 static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5095 {
5096 	struct hci_conn *conn = cmd->user_data;
5097 	struct mgmt_rp_get_conn_info rp;
5098 
5099 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5100 
5101 	if (status == MGMT_STATUS_SUCCESS) {
5102 		rp.rssi = conn->rssi;
5103 		rp.tx_power = conn->tx_power;
5104 		rp.max_tx_power = conn->max_tx_power;
5105 	} else {
5106 		rp.rssi = HCI_RSSI_INVALID;
5107 		rp.tx_power = HCI_TX_POWER_INVALID;
5108 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5109 	}
5110 
5111 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5112 		     &rp, sizeof(rp));
5113 
5114 	hci_conn_drop(conn);
5115 	hci_conn_put(conn);
5116 }
5117 
5118 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
5119 {
5120 	struct hci_cp_read_rssi *cp;
5121 	struct pending_cmd *cmd;
5122 	struct hci_conn *conn;
5123 	u16 handle;
5124 	u8 status;
5125 
5126 	BT_DBG("status 0x%02x", hci_status);
5127 
5128 	hci_dev_lock(hdev);
5129 
5130 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5131 	 * Level so we check which one was last sent to retrieve connection
5132 	 * handle.  Both commands have handle as first parameter so it's safe to
5133 	 * cast data on the same command struct.
5134 	 *
5135 	 * First command sent is always Read RSSI and we fail only if it fails.
5136 	 * In other case we simply override error to indicate success as we
5137 	 * already remembered if TX power value is actually valid.
5138 	 */
5139 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5140 	if (!cp) {
5141 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5142 		status = MGMT_STATUS_SUCCESS;
5143 	} else {
5144 		status = mgmt_status(hci_status);
5145 	}
5146 
5147 	if (!cp) {
5148 		BT_ERR("invalid sent_cmd in conn_info response");
5149 		goto unlock;
5150 	}
5151 
5152 	handle = __le16_to_cpu(cp->handle);
5153 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5154 	if (!conn) {
5155 		BT_ERR("unknown handle (%d) in conn_info response", handle);
5156 		goto unlock;
5157 	}
5158 
5159 	cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5160 	if (!cmd)
5161 		goto unlock;
5162 
5163 	cmd->cmd_complete(cmd, status);
5164 	mgmt_pending_remove(cmd);
5165 
5166 unlock:
5167 	hci_dev_unlock(hdev);
5168 }
5169 
5170 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5171 			 u16 len)
5172 {
5173 	struct mgmt_cp_get_conn_info *cp = data;
5174 	struct mgmt_rp_get_conn_info rp;
5175 	struct hci_conn *conn;
5176 	unsigned long conn_info_age;
5177 	int err = 0;
5178 
5179 	BT_DBG("%s", hdev->name);
5180 
5181 	memset(&rp, 0, sizeof(rp));
5182 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5183 	rp.addr.type = cp->addr.type;
5184 
5185 	if (!bdaddr_type_is_valid(cp->addr.type))
5186 		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5187 				    MGMT_STATUS_INVALID_PARAMS,
5188 				    &rp, sizeof(rp));
5189 
5190 	hci_dev_lock(hdev);
5191 
5192 	if (!hdev_is_powered(hdev)) {
5193 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5194 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5195 		goto unlock;
5196 	}
5197 
5198 	if (cp->addr.type == BDADDR_BREDR)
5199 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5200 					       &cp->addr.bdaddr);
5201 	else
5202 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5203 
5204 	if (!conn || conn->state != BT_CONNECTED) {
5205 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5206 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5207 		goto unlock;
5208 	}
5209 
5210 	if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5211 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5212 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
5213 		goto unlock;
5214 	}
5215 
5216 	/* To avoid client trying to guess when to poll again for information we
5217 	 * calculate conn info age as random value between min/max set in hdev.
5218 	 */
5219 	conn_info_age = hdev->conn_info_min_age +
5220 			prandom_u32_max(hdev->conn_info_max_age -
5221 					hdev->conn_info_min_age);
5222 
5223 	/* Query controller to refresh cached values if they are too old or were
5224 	 * never read.
5225 	 */
5226 	if (time_after(jiffies, conn->conn_info_timestamp +
5227 		       msecs_to_jiffies(conn_info_age)) ||
5228 	    !conn->conn_info_timestamp) {
5229 		struct hci_request req;
5230 		struct hci_cp_read_tx_power req_txp_cp;
5231 		struct hci_cp_read_rssi req_rssi_cp;
5232 		struct pending_cmd *cmd;
5233 
5234 		hci_req_init(&req, hdev);
5235 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5236 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5237 			    &req_rssi_cp);
5238 
5239 		/* For LE links TX power does not change thus we don't need to
5240 		 * query for it once value is known.
5241 		 */
5242 		if (!bdaddr_type_is_le(cp->addr.type) ||
5243 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5244 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5245 			req_txp_cp.type = 0x00;
5246 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5247 				    sizeof(req_txp_cp), &req_txp_cp);
5248 		}
5249 
5250 		/* Max TX power needs to be read only once per connection */
5251 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5252 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5253 			req_txp_cp.type = 0x01;
5254 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5255 				    sizeof(req_txp_cp), &req_txp_cp);
5256 		}
5257 
5258 		err = hci_req_run(&req, conn_info_refresh_complete);
5259 		if (err < 0)
5260 			goto unlock;
5261 
5262 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5263 				       data, len);
5264 		if (!cmd) {
5265 			err = -ENOMEM;
5266 			goto unlock;
5267 		}
5268 
5269 		hci_conn_hold(conn);
5270 		cmd->user_data = hci_conn_get(conn);
5271 		cmd->cmd_complete = conn_info_cmd_complete;
5272 
5273 		conn->conn_info_timestamp = jiffies;
5274 	} else {
5275 		/* Cache is valid, just reply with values cached in hci_conn */
5276 		rp.rssi = conn->rssi;
5277 		rp.tx_power = conn->tx_power;
5278 		rp.max_tx_power = conn->max_tx_power;
5279 
5280 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5281 				   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5282 	}
5283 
5284 unlock:
5285 	hci_dev_unlock(hdev);
5286 	return err;
5287 }
5288 
5289 static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5290 {
5291 	struct hci_conn *conn = cmd->user_data;
5292 	struct mgmt_rp_get_clock_info rp;
5293 	struct hci_dev *hdev;
5294 
5295 	memset(&rp, 0, sizeof(rp));
5296 	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5297 
5298 	if (status)
5299 		goto complete;
5300 
5301 	hdev = hci_dev_get(cmd->index);
5302 	if (hdev) {
5303 		rp.local_clock = cpu_to_le32(hdev->clock);
5304 		hci_dev_put(hdev);
5305 	}
5306 
5307 	if (conn) {
5308 		rp.piconet_clock = cpu_to_le32(conn->clock);
5309 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5310 	}
5311 
5312 complete:
5313 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
5314 
5315 	if (conn) {
5316 		hci_conn_drop(conn);
5317 		hci_conn_put(conn);
5318 	}
5319 }
5320 
5321 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5322 {
5323 	struct hci_cp_read_clock *hci_cp;
5324 	struct pending_cmd *cmd;
5325 	struct hci_conn *conn;
5326 
5327 	BT_DBG("%s status %u", hdev->name, status);
5328 
5329 	hci_dev_lock(hdev);
5330 
5331 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5332 	if (!hci_cp)
5333 		goto unlock;
5334 
5335 	if (hci_cp->which) {
5336 		u16 handle = __le16_to_cpu(hci_cp->handle);
5337 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5338 	} else {
5339 		conn = NULL;
5340 	}
5341 
5342 	cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5343 	if (!cmd)
5344 		goto unlock;
5345 
5346 	cmd->cmd_complete(cmd, mgmt_status(status));
5347 	mgmt_pending_remove(cmd);
5348 
5349 unlock:
5350 	hci_dev_unlock(hdev);
5351 }
5352 
5353 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5354 			 u16 len)
5355 {
5356 	struct mgmt_cp_get_clock_info *cp = data;
5357 	struct mgmt_rp_get_clock_info rp;
5358 	struct hci_cp_read_clock hci_cp;
5359 	struct pending_cmd *cmd;
5360 	struct hci_request req;
5361 	struct hci_conn *conn;
5362 	int err;
5363 
5364 	BT_DBG("%s", hdev->name);
5365 
5366 	memset(&rp, 0, sizeof(rp));
5367 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5368 	rp.addr.type = cp->addr.type;
5369 
5370 	if (cp->addr.type != BDADDR_BREDR)
5371 		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5372 				    MGMT_STATUS_INVALID_PARAMS,
5373 				    &rp, sizeof(rp));
5374 
5375 	hci_dev_lock(hdev);
5376 
5377 	if (!hdev_is_powered(hdev)) {
5378 		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5379 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5380 		goto unlock;
5381 	}
5382 
5383 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5384 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5385 					       &cp->addr.bdaddr);
5386 		if (!conn || conn->state != BT_CONNECTED) {
5387 			err = cmd_complete(sk, hdev->id,
5388 					   MGMT_OP_GET_CLOCK_INFO,
5389 					   MGMT_STATUS_NOT_CONNECTED,
5390 					   &rp, sizeof(rp));
5391 			goto unlock;
5392 		}
5393 	} else {
5394 		conn = NULL;
5395 	}
5396 
5397 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5398 	if (!cmd) {
5399 		err = -ENOMEM;
5400 		goto unlock;
5401 	}
5402 
5403 	cmd->cmd_complete = clock_info_cmd_complete;
5404 
5405 	hci_req_init(&req, hdev);
5406 
5407 	memset(&hci_cp, 0, sizeof(hci_cp));
5408 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5409 
5410 	if (conn) {
5411 		hci_conn_hold(conn);
5412 		cmd->user_data = hci_conn_get(conn);
5413 
5414 		hci_cp.handle = cpu_to_le16(conn->handle);
5415 		hci_cp.which = 0x01; /* Piconet clock */
5416 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5417 	}
5418 
5419 	err = hci_req_run(&req, get_clock_info_complete);
5420 	if (err < 0)
5421 		mgmt_pending_remove(cmd);
5422 
5423 unlock:
5424 	hci_dev_unlock(hdev);
5425 	return err;
5426 }
5427 
5428 static void device_added(struct sock *sk, struct hci_dev *hdev,
5429 			 bdaddr_t *bdaddr, u8 type, u8 action)
5430 {
5431 	struct mgmt_ev_device_added ev;
5432 
5433 	bacpy(&ev.addr.bdaddr, bdaddr);
5434 	ev.addr.type = type;
5435 	ev.action = action;
5436 
5437 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5438 }
5439 
5440 static int add_device(struct sock *sk, struct hci_dev *hdev,
5441 		      void *data, u16 len)
5442 {
5443 	struct mgmt_cp_add_device *cp = data;
5444 	u8 auto_conn, addr_type;
5445 	int err;
5446 
5447 	BT_DBG("%s", hdev->name);
5448 
5449 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5450 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5451 		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5452 				    MGMT_STATUS_INVALID_PARAMS,
5453 				    &cp->addr, sizeof(cp->addr));
5454 
5455 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5456 		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5457 				    MGMT_STATUS_INVALID_PARAMS,
5458 				    &cp->addr, sizeof(cp->addr));
5459 
5460 	hci_dev_lock(hdev);
5461 
5462 	if (cp->addr.type == BDADDR_BREDR) {
5463 		/* Only incoming connections action is supported for now */
5464 		if (cp->action != 0x01) {
5465 			err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5466 					   MGMT_STATUS_INVALID_PARAMS,
5467 					   &cp->addr, sizeof(cp->addr));
5468 			goto unlock;
5469 		}
5470 
5471 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5472 					  cp->addr.type);
5473 		if (err)
5474 			goto unlock;
5475 
5476 		hci_update_page_scan(hdev, NULL);
5477 
5478 		goto added;
5479 	}
5480 
5481 	if (cp->addr.type == BDADDR_LE_PUBLIC)
5482 		addr_type = ADDR_LE_DEV_PUBLIC;
5483 	else
5484 		addr_type = ADDR_LE_DEV_RANDOM;
5485 
5486 	if (cp->action == 0x02)
5487 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5488 	else if (cp->action == 0x01)
5489 		auto_conn = HCI_AUTO_CONN_DIRECT;
5490 	else
5491 		auto_conn = HCI_AUTO_CONN_REPORT;
5492 
5493 	/* If the connection parameters don't exist for this device,
5494 	 * they will be created and configured with defaults.
5495 	 */
5496 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5497 				auto_conn) < 0) {
5498 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5499 				   MGMT_STATUS_FAILED,
5500 				   &cp->addr, sizeof(cp->addr));
5501 		goto unlock;
5502 	}
5503 
5504 added:
5505 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5506 
5507 	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5508 			   MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5509 
5510 unlock:
5511 	hci_dev_unlock(hdev);
5512 	return err;
5513 }
5514 
5515 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5516 			   bdaddr_t *bdaddr, u8 type)
5517 {
5518 	struct mgmt_ev_device_removed ev;
5519 
5520 	bacpy(&ev.addr.bdaddr, bdaddr);
5521 	ev.addr.type = type;
5522 
5523 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5524 }
5525 
5526 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5527 			 void *data, u16 len)
5528 {
5529 	struct mgmt_cp_remove_device *cp = data;
5530 	int err;
5531 
5532 	BT_DBG("%s", hdev->name);
5533 
5534 	hci_dev_lock(hdev);
5535 
5536 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5537 		struct hci_conn_params *params;
5538 		u8 addr_type;
5539 
5540 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5541 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5542 					   MGMT_STATUS_INVALID_PARAMS,
5543 					   &cp->addr, sizeof(cp->addr));
5544 			goto unlock;
5545 		}
5546 
5547 		if (cp->addr.type == BDADDR_BREDR) {
5548 			err = hci_bdaddr_list_del(&hdev->whitelist,
5549 						  &cp->addr.bdaddr,
5550 						  cp->addr.type);
5551 			if (err) {
5552 				err = cmd_complete(sk, hdev->id,
5553 						   MGMT_OP_REMOVE_DEVICE,
5554 						   MGMT_STATUS_INVALID_PARAMS,
5555 						   &cp->addr, sizeof(cp->addr));
5556 				goto unlock;
5557 			}
5558 
5559 			hci_update_page_scan(hdev, NULL);
5560 
5561 			device_removed(sk, hdev, &cp->addr.bdaddr,
5562 				       cp->addr.type);
5563 			goto complete;
5564 		}
5565 
5566 		if (cp->addr.type == BDADDR_LE_PUBLIC)
5567 			addr_type = ADDR_LE_DEV_PUBLIC;
5568 		else
5569 			addr_type = ADDR_LE_DEV_RANDOM;
5570 
5571 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5572 						addr_type);
5573 		if (!params) {
5574 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5575 					   MGMT_STATUS_INVALID_PARAMS,
5576 					   &cp->addr, sizeof(cp->addr));
5577 			goto unlock;
5578 		}
5579 
5580 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5581 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5582 					   MGMT_STATUS_INVALID_PARAMS,
5583 					   &cp->addr, sizeof(cp->addr));
5584 			goto unlock;
5585 		}
5586 
5587 		list_del(&params->action);
5588 		list_del(&params->list);
5589 		kfree(params);
5590 		hci_update_background_scan(hdev);
5591 
5592 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5593 	} else {
5594 		struct hci_conn_params *p, *tmp;
5595 		struct bdaddr_list *b, *btmp;
5596 
5597 		if (cp->addr.type) {
5598 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5599 					   MGMT_STATUS_INVALID_PARAMS,
5600 					   &cp->addr, sizeof(cp->addr));
5601 			goto unlock;
5602 		}
5603 
5604 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5605 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5606 			list_del(&b->list);
5607 			kfree(b);
5608 		}
5609 
5610 		hci_update_page_scan(hdev, NULL);
5611 
5612 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5613 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5614 				continue;
5615 			device_removed(sk, hdev, &p->addr, p->addr_type);
5616 			list_del(&p->action);
5617 			list_del(&p->list);
5618 			kfree(p);
5619 		}
5620 
5621 		BT_DBG("All LE connection parameters were removed");
5622 
5623 		hci_update_background_scan(hdev);
5624 	}
5625 
5626 complete:
5627 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5628 			   MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5629 
5630 unlock:
5631 	hci_dev_unlock(hdev);
5632 	return err;
5633 }
5634 
5635 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5636 			   u16 len)
5637 {
5638 	struct mgmt_cp_load_conn_param *cp = data;
5639 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5640 				     sizeof(struct mgmt_conn_param));
5641 	u16 param_count, expected_len;
5642 	int i;
5643 
5644 	if (!lmp_le_capable(hdev))
5645 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5646 				  MGMT_STATUS_NOT_SUPPORTED);
5647 
5648 	param_count = __le16_to_cpu(cp->param_count);
5649 	if (param_count > max_param_count) {
5650 		BT_ERR("load_conn_param: too big param_count value %u",
5651 		       param_count);
5652 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5653 				  MGMT_STATUS_INVALID_PARAMS);
5654 	}
5655 
5656 	expected_len = sizeof(*cp) + param_count *
5657 					sizeof(struct mgmt_conn_param);
5658 	if (expected_len != len) {
5659 		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5660 		       expected_len, len);
5661 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5662 				  MGMT_STATUS_INVALID_PARAMS);
5663 	}
5664 
5665 	BT_DBG("%s param_count %u", hdev->name, param_count);
5666 
5667 	hci_dev_lock(hdev);
5668 
5669 	hci_conn_params_clear_disabled(hdev);
5670 
5671 	for (i = 0; i < param_count; i++) {
5672 		struct mgmt_conn_param *param = &cp->params[i];
5673 		struct hci_conn_params *hci_param;
5674 		u16 min, max, latency, timeout;
5675 		u8 addr_type;
5676 
5677 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5678 		       param->addr.type);
5679 
5680 		if (param->addr.type == BDADDR_LE_PUBLIC) {
5681 			addr_type = ADDR_LE_DEV_PUBLIC;
5682 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
5683 			addr_type = ADDR_LE_DEV_RANDOM;
5684 		} else {
5685 			BT_ERR("Ignoring invalid connection parameters");
5686 			continue;
5687 		}
5688 
5689 		min = le16_to_cpu(param->min_interval);
5690 		max = le16_to_cpu(param->max_interval);
5691 		latency = le16_to_cpu(param->latency);
5692 		timeout = le16_to_cpu(param->timeout);
5693 
5694 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5695 		       min, max, latency, timeout);
5696 
5697 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5698 			BT_ERR("Ignoring invalid connection parameters");
5699 			continue;
5700 		}
5701 
5702 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5703 						addr_type);
5704 		if (!hci_param) {
5705 			BT_ERR("Failed to add connection parameters");
5706 			continue;
5707 		}
5708 
5709 		hci_param->conn_min_interval = min;
5710 		hci_param->conn_max_interval = max;
5711 		hci_param->conn_latency = latency;
5712 		hci_param->supervision_timeout = timeout;
5713 	}
5714 
5715 	hci_dev_unlock(hdev);
5716 
5717 	return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5718 }
5719 
5720 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5721 			       void *data, u16 len)
5722 {
5723 	struct mgmt_cp_set_external_config *cp = data;
5724 	bool changed;
5725 	int err;
5726 
5727 	BT_DBG("%s", hdev->name);
5728 
5729 	if (hdev_is_powered(hdev))
5730 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5731 				  MGMT_STATUS_REJECTED);
5732 
5733 	if (cp->config != 0x00 && cp->config != 0x01)
5734 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5735 				    MGMT_STATUS_INVALID_PARAMS);
5736 
5737 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5738 		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5739 				  MGMT_STATUS_NOT_SUPPORTED);
5740 
5741 	hci_dev_lock(hdev);
5742 
5743 	if (cp->config)
5744 		changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5745 					    &hdev->dev_flags);
5746 	else
5747 		changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5748 					     &hdev->dev_flags);
5749 
5750 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5751 	if (err < 0)
5752 		goto unlock;
5753 
5754 	if (!changed)
5755 		goto unlock;
5756 
5757 	err = new_options(hdev, sk);
5758 
5759 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5760 		mgmt_index_removed(hdev);
5761 
5762 		if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5763 			set_bit(HCI_CONFIG, &hdev->dev_flags);
5764 			set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5765 
5766 			queue_work(hdev->req_workqueue, &hdev->power_on);
5767 		} else {
5768 			set_bit(HCI_RAW, &hdev->flags);
5769 			mgmt_index_added(hdev);
5770 		}
5771 	}
5772 
5773 unlock:
5774 	hci_dev_unlock(hdev);
5775 	return err;
5776 }
5777 
5778 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5779 			      void *data, u16 len)
5780 {
5781 	struct mgmt_cp_set_public_address *cp = data;
5782 	bool changed;
5783 	int err;
5784 
5785 	BT_DBG("%s", hdev->name);
5786 
5787 	if (hdev_is_powered(hdev))
5788 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5789 				  MGMT_STATUS_REJECTED);
5790 
5791 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5792 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5793 				  MGMT_STATUS_INVALID_PARAMS);
5794 
5795 	if (!hdev->set_bdaddr)
5796 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5797 				  MGMT_STATUS_NOT_SUPPORTED);
5798 
5799 	hci_dev_lock(hdev);
5800 
5801 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5802 	bacpy(&hdev->public_addr, &cp->bdaddr);
5803 
5804 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5805 	if (err < 0)
5806 		goto unlock;
5807 
5808 	if (!changed)
5809 		goto unlock;
5810 
5811 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5812 		err = new_options(hdev, sk);
5813 
5814 	if (is_configured(hdev)) {
5815 		mgmt_index_removed(hdev);
5816 
5817 		clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5818 
5819 		set_bit(HCI_CONFIG, &hdev->dev_flags);
5820 		set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5821 
5822 		queue_work(hdev->req_workqueue, &hdev->power_on);
5823 	}
5824 
5825 unlock:
5826 	hci_dev_unlock(hdev);
5827 	return err;
5828 }
5829 
5830 static const struct mgmt_handler {
5831 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5832 		     u16 data_len);
5833 	bool var_len;
5834 	size_t data_len;
5835 } mgmt_handlers[] = {
5836 	{ NULL }, /* 0x0000 (no command) */
5837 	{ read_version,           false, MGMT_READ_VERSION_SIZE },
5838 	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
5839 	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
5840 	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
5841 	{ set_powered,            false, MGMT_SETTING_SIZE },
5842 	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
5843 	{ set_connectable,        false, MGMT_SETTING_SIZE },
5844 	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
5845 	{ set_bondable,           false, MGMT_SETTING_SIZE },
5846 	{ set_link_security,      false, MGMT_SETTING_SIZE },
5847 	{ set_ssp,                false, MGMT_SETTING_SIZE },
5848 	{ set_hs,                 false, MGMT_SETTING_SIZE },
5849 	{ set_le,                 false, MGMT_SETTING_SIZE },
5850 	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
5851 	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
5852 	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
5853 	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
5854 	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
5855 	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5856 	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
5857 	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
5858 	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
5859 	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5860 	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
5861 	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
5862 	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5863 	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
5864 	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
5865 	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5866 	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
5867 	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5868 	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5869 	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5870 	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5871 	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
5872 	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
5873 	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
5874 	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
5875 	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
5876 	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
5877 	{ set_advertising,        false, MGMT_SETTING_SIZE },
5878 	{ set_bredr,              false, MGMT_SETTING_SIZE },
5879 	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
5880 	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
5881 	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
5882 	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
5883 	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
5884 	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
5885 	{ get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },
5886 	{ get_clock_info,         false, MGMT_GET_CLOCK_INFO_SIZE },
5887 	{ add_device,             false, MGMT_ADD_DEVICE_SIZE },
5888 	{ remove_device,          false, MGMT_REMOVE_DEVICE_SIZE },
5889 	{ load_conn_param,        true,  MGMT_LOAD_CONN_PARAM_SIZE },
5890 	{ read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5891 	{ read_config_info,       false, MGMT_READ_CONFIG_INFO_SIZE },
5892 	{ set_external_config,    false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5893 	{ set_public_address,     false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5894 	{ start_service_discovery,true,  MGMT_START_SERVICE_DISCOVERY_SIZE },
5895 };
5896 
5897 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5898 {
5899 	void *buf;
5900 	u8 *cp;
5901 	struct mgmt_hdr *hdr;
5902 	u16 opcode, index, len;
5903 	struct hci_dev *hdev = NULL;
5904 	const struct mgmt_handler *handler;
5905 	int err;
5906 
5907 	BT_DBG("got %zu bytes", msglen);
5908 
5909 	if (msglen < sizeof(*hdr))
5910 		return -EINVAL;
5911 
5912 	buf = kmalloc(msglen, GFP_KERNEL);
5913 	if (!buf)
5914 		return -ENOMEM;
5915 
5916 	if (memcpy_from_msg(buf, msg, msglen)) {
5917 		err = -EFAULT;
5918 		goto done;
5919 	}
5920 
5921 	hdr = buf;
5922 	opcode = __le16_to_cpu(hdr->opcode);
5923 	index = __le16_to_cpu(hdr->index);
5924 	len = __le16_to_cpu(hdr->len);
5925 
5926 	if (len != msglen - sizeof(*hdr)) {
5927 		err = -EINVAL;
5928 		goto done;
5929 	}
5930 
5931 	if (index != MGMT_INDEX_NONE) {
5932 		hdev = hci_dev_get(index);
5933 		if (!hdev) {
5934 			err = cmd_status(sk, index, opcode,
5935 					 MGMT_STATUS_INVALID_INDEX);
5936 			goto done;
5937 		}
5938 
5939 		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5940 		    test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5941 		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5942 			err = cmd_status(sk, index, opcode,
5943 					 MGMT_STATUS_INVALID_INDEX);
5944 			goto done;
5945 		}
5946 
5947 		if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5948 		    opcode != MGMT_OP_READ_CONFIG_INFO &&
5949 		    opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5950 		    opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5951 			err = cmd_status(sk, index, opcode,
5952 					 MGMT_STATUS_INVALID_INDEX);
5953 			goto done;
5954 		}
5955 	}
5956 
5957 	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5958 	    mgmt_handlers[opcode].func == NULL) {
5959 		BT_DBG("Unknown op %u", opcode);
5960 		err = cmd_status(sk, index, opcode,
5961 				 MGMT_STATUS_UNKNOWN_COMMAND);
5962 		goto done;
5963 	}
5964 
5965 	if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5966 		     opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5967 		err = cmd_status(sk, index, opcode,
5968 				 MGMT_STATUS_INVALID_INDEX);
5969 		goto done;
5970 	}
5971 
5972 	if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5973 		      opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5974 		err = cmd_status(sk, index, opcode,
5975 				 MGMT_STATUS_INVALID_INDEX);
5976 		goto done;
5977 	}
5978 
5979 	handler = &mgmt_handlers[opcode];
5980 
5981 	if ((handler->var_len && len < handler->data_len) ||
5982 	    (!handler->var_len && len != handler->data_len)) {
5983 		err = cmd_status(sk, index, opcode,
5984 				 MGMT_STATUS_INVALID_PARAMS);
5985 		goto done;
5986 	}
5987 
5988 	if (hdev)
5989 		mgmt_init_hdev(sk, hdev);
5990 
5991 	cp = buf + sizeof(*hdr);
5992 
5993 	err = handler->func(sk, hdev, cp, len);
5994 	if (err < 0)
5995 		goto done;
5996 
5997 	err = msglen;
5998 
5999 done:
6000 	if (hdev)
6001 		hci_dev_put(hdev);
6002 
6003 	kfree(buf);
6004 	return err;
6005 }
6006 
6007 void mgmt_index_added(struct hci_dev *hdev)
6008 {
6009 	if (hdev->dev_type != HCI_BREDR)
6010 		return;
6011 
6012 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6013 		return;
6014 
6015 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6016 		mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6017 	else
6018 		mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6019 }
6020 
6021 void mgmt_index_removed(struct hci_dev *hdev)
6022 {
6023 	u8 status = MGMT_STATUS_INVALID_INDEX;
6024 
6025 	if (hdev->dev_type != HCI_BREDR)
6026 		return;
6027 
6028 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6029 		return;
6030 
6031 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6032 
6033 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6034 		mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6035 	else
6036 		mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6037 }
6038 
6039 /* This function requires the caller holds hdev->lock */
6040 static void restart_le_actions(struct hci_dev *hdev)
6041 {
6042 	struct hci_conn_params *p;
6043 
6044 	list_for_each_entry(p, &hdev->le_conn_params, list) {
6045 		/* Needed for AUTO_OFF case where might not "really"
6046 		 * have been powered off.
6047 		 */
6048 		list_del_init(&p->action);
6049 
6050 		switch (p->auto_connect) {
6051 		case HCI_AUTO_CONN_DIRECT:
6052 		case HCI_AUTO_CONN_ALWAYS:
6053 			list_add(&p->action, &hdev->pend_le_conns);
6054 			break;
6055 		case HCI_AUTO_CONN_REPORT:
6056 			list_add(&p->action, &hdev->pend_le_reports);
6057 			break;
6058 		default:
6059 			break;
6060 		}
6061 	}
6062 
6063 	hci_update_background_scan(hdev);
6064 }
6065 
6066 static void powered_complete(struct hci_dev *hdev, u8 status)
6067 {
6068 	struct cmd_lookup match = { NULL, hdev };
6069 
6070 	BT_DBG("status 0x%02x", status);
6071 
6072 	hci_dev_lock(hdev);
6073 
6074 	restart_le_actions(hdev);
6075 
6076 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6077 
6078 	new_settings(hdev, match.sk);
6079 
6080 	hci_dev_unlock(hdev);
6081 
6082 	if (match.sk)
6083 		sock_put(match.sk);
6084 }
6085 
6086 static int powered_update_hci(struct hci_dev *hdev)
6087 {
6088 	struct hci_request req;
6089 	u8 link_sec;
6090 
6091 	hci_req_init(&req, hdev);
6092 
6093 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6094 	    !lmp_host_ssp_capable(hdev)) {
6095 		u8 ssp = 1;
6096 
6097 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6098 	}
6099 
6100 	if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6101 		u8 sc = 0x01;
6102 		hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
6103 	}
6104 
6105 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6106 	    lmp_bredr_capable(hdev)) {
6107 		struct hci_cp_write_le_host_supported cp;
6108 
6109 		cp.le = 0x01;
6110 		cp.simul = 0x00;
6111 
6112 		/* Check first if we already have the right
6113 		 * host state (host features set)
6114 		 */
6115 		if (cp.le != lmp_host_le_capable(hdev) ||
6116 		    cp.simul != lmp_host_le_br_capable(hdev))
6117 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6118 				    sizeof(cp), &cp);
6119 	}
6120 
6121 	if (lmp_le_capable(hdev)) {
6122 		/* Make sure the controller has a good default for
6123 		 * advertising data. This also applies to the case
6124 		 * where BR/EDR was toggled during the AUTO_OFF phase.
6125 		 */
6126 		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6127 			update_adv_data(&req);
6128 			update_scan_rsp_data(&req);
6129 		}
6130 
6131 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6132 			enable_advertising(&req);
6133 	}
6134 
6135 	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6136 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6137 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6138 			    sizeof(link_sec), &link_sec);
6139 
6140 	if (lmp_bredr_capable(hdev)) {
6141 		write_fast_connectable(&req, false);
6142 		hci_update_page_scan(hdev, &req);
6143 		update_class(&req);
6144 		update_name(&req);
6145 		update_eir(&req);
6146 	}
6147 
6148 	return hci_req_run(&req, powered_complete);
6149 }
6150 
6151 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6152 {
6153 	struct cmd_lookup match = { NULL, hdev };
6154 	u8 status, zero_cod[] = { 0, 0, 0 };
6155 	int err;
6156 
6157 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6158 		return 0;
6159 
6160 	if (powered) {
6161 		if (powered_update_hci(hdev) == 0)
6162 			return 0;
6163 
6164 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6165 				     &match);
6166 		goto new_settings;
6167 	}
6168 
6169 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6170 
6171 	/* If the power off is because of hdev unregistration let
6172 	 * use the appropriate INVALID_INDEX status. Otherwise use
6173 	 * NOT_POWERED. We cover both scenarios here since later in
6174 	 * mgmt_index_removed() any hci_conn callbacks will have already
6175 	 * been triggered, potentially causing misleading DISCONNECTED
6176 	 * status responses.
6177 	 */
6178 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6179 		status = MGMT_STATUS_INVALID_INDEX;
6180 	else
6181 		status = MGMT_STATUS_NOT_POWERED;
6182 
6183 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6184 
6185 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6186 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6187 			   zero_cod, sizeof(zero_cod), NULL);
6188 
6189 new_settings:
6190 	err = new_settings(hdev, match.sk);
6191 
6192 	if (match.sk)
6193 		sock_put(match.sk);
6194 
6195 	return err;
6196 }
6197 
6198 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6199 {
6200 	struct pending_cmd *cmd;
6201 	u8 status;
6202 
6203 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6204 	if (!cmd)
6205 		return;
6206 
6207 	if (err == -ERFKILL)
6208 		status = MGMT_STATUS_RFKILLED;
6209 	else
6210 		status = MGMT_STATUS_FAILED;
6211 
6212 	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6213 
6214 	mgmt_pending_remove(cmd);
6215 }
6216 
6217 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6218 {
6219 	struct hci_request req;
6220 
6221 	hci_dev_lock(hdev);
6222 
6223 	/* When discoverable timeout triggers, then just make sure
6224 	 * the limited discoverable flag is cleared. Even in the case
6225 	 * of a timeout triggered from general discoverable, it is
6226 	 * safe to unconditionally clear the flag.
6227 	 */
6228 	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6229 	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6230 
6231 	hci_req_init(&req, hdev);
6232 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6233 		u8 scan = SCAN_PAGE;
6234 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6235 			    sizeof(scan), &scan);
6236 	}
6237 	update_class(&req);
6238 	update_adv_data(&req);
6239 	hci_req_run(&req, NULL);
6240 
6241 	hdev->discov_timeout = 0;
6242 
6243 	new_settings(hdev, NULL);
6244 
6245 	hci_dev_unlock(hdev);
6246 }
6247 
6248 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6249 		       bool persistent)
6250 {
6251 	struct mgmt_ev_new_link_key ev;
6252 
6253 	memset(&ev, 0, sizeof(ev));
6254 
6255 	ev.store_hint = persistent;
6256 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6257 	ev.key.addr.type = BDADDR_BREDR;
6258 	ev.key.type = key->type;
6259 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6260 	ev.key.pin_len = key->pin_len;
6261 
6262 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6263 }
6264 
6265 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6266 {
6267 	switch (ltk->type) {
6268 	case SMP_LTK:
6269 	case SMP_LTK_SLAVE:
6270 		if (ltk->authenticated)
6271 			return MGMT_LTK_AUTHENTICATED;
6272 		return MGMT_LTK_UNAUTHENTICATED;
6273 	case SMP_LTK_P256:
6274 		if (ltk->authenticated)
6275 			return MGMT_LTK_P256_AUTH;
6276 		return MGMT_LTK_P256_UNAUTH;
6277 	case SMP_LTK_P256_DEBUG:
6278 		return MGMT_LTK_P256_DEBUG;
6279 	}
6280 
6281 	return MGMT_LTK_UNAUTHENTICATED;
6282 }
6283 
6284 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6285 {
6286 	struct mgmt_ev_new_long_term_key ev;
6287 
6288 	memset(&ev, 0, sizeof(ev));
6289 
6290 	/* Devices using resolvable or non-resolvable random addresses
6291 	 * without providing an indentity resolving key don't require
6292 	 * to store long term keys. Their addresses will change the
6293 	 * next time around.
6294 	 *
6295 	 * Only when a remote device provides an identity address
6296 	 * make sure the long term key is stored. If the remote
6297 	 * identity is known, the long term keys are internally
6298 	 * mapped to the identity address. So allow static random
6299 	 * and public addresses here.
6300 	 */
6301 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6302 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
6303 		ev.store_hint = 0x00;
6304 	else
6305 		ev.store_hint = persistent;
6306 
6307 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6308 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6309 	ev.key.type = mgmt_ltk_type(key);
6310 	ev.key.enc_size = key->enc_size;
6311 	ev.key.ediv = key->ediv;
6312 	ev.key.rand = key->rand;
6313 
6314 	if (key->type == SMP_LTK)
6315 		ev.key.master = 1;
6316 
6317 	memcpy(ev.key.val, key->val, sizeof(key->val));
6318 
6319 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6320 }
6321 
6322 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6323 {
6324 	struct mgmt_ev_new_irk ev;
6325 
6326 	memset(&ev, 0, sizeof(ev));
6327 
6328 	/* For identity resolving keys from devices that are already
6329 	 * using a public address or static random address, do not
6330 	 * ask for storing this key. The identity resolving key really
6331 	 * is only mandatory for devices using resovlable random
6332 	 * addresses.
6333 	 *
6334 	 * Storing all identity resolving keys has the downside that
6335 	 * they will be also loaded on next boot of they system. More
6336 	 * identity resolving keys, means more time during scanning is
6337 	 * needed to actually resolve these addresses.
6338 	 */
6339 	if (bacmp(&irk->rpa, BDADDR_ANY))
6340 		ev.store_hint = 0x01;
6341 	else
6342 		ev.store_hint = 0x00;
6343 
6344 	bacpy(&ev.rpa, &irk->rpa);
6345 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6346 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6347 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6348 
6349 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6350 }
6351 
6352 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6353 		   bool persistent)
6354 {
6355 	struct mgmt_ev_new_csrk ev;
6356 
6357 	memset(&ev, 0, sizeof(ev));
6358 
6359 	/* Devices using resolvable or non-resolvable random addresses
6360 	 * without providing an indentity resolving key don't require
6361 	 * to store signature resolving keys. Their addresses will change
6362 	 * the next time around.
6363 	 *
6364 	 * Only when a remote device provides an identity address
6365 	 * make sure the signature resolving key is stored. So allow
6366 	 * static random and public addresses here.
6367 	 */
6368 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6369 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6370 		ev.store_hint = 0x00;
6371 	else
6372 		ev.store_hint = persistent;
6373 
6374 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6375 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6376 	ev.key.master = csrk->master;
6377 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6378 
6379 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6380 }
6381 
6382 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6383 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6384 			 u16 max_interval, u16 latency, u16 timeout)
6385 {
6386 	struct mgmt_ev_new_conn_param ev;
6387 
6388 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
6389 		return;
6390 
6391 	memset(&ev, 0, sizeof(ev));
6392 	bacpy(&ev.addr.bdaddr, bdaddr);
6393 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6394 	ev.store_hint = store_hint;
6395 	ev.min_interval = cpu_to_le16(min_interval);
6396 	ev.max_interval = cpu_to_le16(max_interval);
6397 	ev.latency = cpu_to_le16(latency);
6398 	ev.timeout = cpu_to_le16(timeout);
6399 
6400 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6401 }
6402 
6403 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6404 				  u8 data_len)
6405 {
6406 	eir[eir_len++] = sizeof(type) + data_len;
6407 	eir[eir_len++] = type;
6408 	memcpy(&eir[eir_len], data, data_len);
6409 	eir_len += data_len;
6410 
6411 	return eir_len;
6412 }
6413 
6414 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6415 			   u32 flags, u8 *name, u8 name_len)
6416 {
6417 	char buf[512];
6418 	struct mgmt_ev_device_connected *ev = (void *) buf;
6419 	u16 eir_len = 0;
6420 
6421 	bacpy(&ev->addr.bdaddr, &conn->dst);
6422 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6423 
6424 	ev->flags = __cpu_to_le32(flags);
6425 
6426 	/* We must ensure that the EIR Data fields are ordered and
6427 	 * unique. Keep it simple for now and avoid the problem by not
6428 	 * adding any BR/EDR data to the LE adv.
6429 	 */
6430 	if (conn->le_adv_data_len > 0) {
6431 		memcpy(&ev->eir[eir_len],
6432 		       conn->le_adv_data, conn->le_adv_data_len);
6433 		eir_len = conn->le_adv_data_len;
6434 	} else {
6435 		if (name_len > 0)
6436 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6437 						  name, name_len);
6438 
6439 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6440 			eir_len = eir_append_data(ev->eir, eir_len,
6441 						  EIR_CLASS_OF_DEV,
6442 						  conn->dev_class, 3);
6443 	}
6444 
6445 	ev->eir_len = cpu_to_le16(eir_len);
6446 
6447 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6448 		    sizeof(*ev) + eir_len, NULL);
6449 }
6450 
6451 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6452 {
6453 	struct sock **sk = data;
6454 
6455 	cmd->cmd_complete(cmd, 0);
6456 
6457 	*sk = cmd->sk;
6458 	sock_hold(*sk);
6459 
6460 	mgmt_pending_remove(cmd);
6461 }
6462 
6463 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6464 {
6465 	struct hci_dev *hdev = data;
6466 	struct mgmt_cp_unpair_device *cp = cmd->param;
6467 
6468 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6469 
6470 	cmd->cmd_complete(cmd, 0);
6471 	mgmt_pending_remove(cmd);
6472 }
6473 
6474 bool mgmt_powering_down(struct hci_dev *hdev)
6475 {
6476 	struct pending_cmd *cmd;
6477 	struct mgmt_mode *cp;
6478 
6479 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6480 	if (!cmd)
6481 		return false;
6482 
6483 	cp = cmd->param;
6484 	if (!cp->val)
6485 		return true;
6486 
6487 	return false;
6488 }
6489 
6490 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6491 			      u8 link_type, u8 addr_type, u8 reason,
6492 			      bool mgmt_connected)
6493 {
6494 	struct mgmt_ev_device_disconnected ev;
6495 	struct sock *sk = NULL;
6496 
6497 	/* The connection is still in hci_conn_hash so test for 1
6498 	 * instead of 0 to know if this is the last one.
6499 	 */
6500 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6501 		cancel_delayed_work(&hdev->power_off);
6502 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6503 	}
6504 
6505 	if (!mgmt_connected)
6506 		return;
6507 
6508 	if (link_type != ACL_LINK && link_type != LE_LINK)
6509 		return;
6510 
6511 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6512 
6513 	bacpy(&ev.addr.bdaddr, bdaddr);
6514 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6515 	ev.reason = reason;
6516 
6517 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6518 
6519 	if (sk)
6520 		sock_put(sk);
6521 
6522 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6523 			     hdev);
6524 }
6525 
6526 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6527 			    u8 link_type, u8 addr_type, u8 status)
6528 {
6529 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6530 	struct mgmt_cp_disconnect *cp;
6531 	struct pending_cmd *cmd;
6532 
6533 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6534 			     hdev);
6535 
6536 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6537 	if (!cmd)
6538 		return;
6539 
6540 	cp = cmd->param;
6541 
6542 	if (bacmp(bdaddr, &cp->addr.bdaddr))
6543 		return;
6544 
6545 	if (cp->addr.type != bdaddr_type)
6546 		return;
6547 
6548 	cmd->cmd_complete(cmd, mgmt_status(status));
6549 	mgmt_pending_remove(cmd);
6550 }
6551 
6552 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6553 			 u8 addr_type, u8 status)
6554 {
6555 	struct mgmt_ev_connect_failed ev;
6556 
6557 	/* The connection is still in hci_conn_hash so test for 1
6558 	 * instead of 0 to know if this is the last one.
6559 	 */
6560 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6561 		cancel_delayed_work(&hdev->power_off);
6562 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6563 	}
6564 
6565 	bacpy(&ev.addr.bdaddr, bdaddr);
6566 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6567 	ev.status = mgmt_status(status);
6568 
6569 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6570 }
6571 
6572 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6573 {
6574 	struct mgmt_ev_pin_code_request ev;
6575 
6576 	bacpy(&ev.addr.bdaddr, bdaddr);
6577 	ev.addr.type = BDADDR_BREDR;
6578 	ev.secure = secure;
6579 
6580 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6581 }
6582 
6583 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6584 				  u8 status)
6585 {
6586 	struct pending_cmd *cmd;
6587 
6588 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6589 	if (!cmd)
6590 		return;
6591 
6592 	cmd->cmd_complete(cmd, mgmt_status(status));
6593 	mgmt_pending_remove(cmd);
6594 }
6595 
6596 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6597 				      u8 status)
6598 {
6599 	struct pending_cmd *cmd;
6600 
6601 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6602 	if (!cmd)
6603 		return;
6604 
6605 	cmd->cmd_complete(cmd, mgmt_status(status));
6606 	mgmt_pending_remove(cmd);
6607 }
6608 
6609 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6610 			      u8 link_type, u8 addr_type, u32 value,
6611 			      u8 confirm_hint)
6612 {
6613 	struct mgmt_ev_user_confirm_request ev;
6614 
6615 	BT_DBG("%s", hdev->name);
6616 
6617 	bacpy(&ev.addr.bdaddr, bdaddr);
6618 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6619 	ev.confirm_hint = confirm_hint;
6620 	ev.value = cpu_to_le32(value);
6621 
6622 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6623 			  NULL);
6624 }
6625 
6626 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6627 			      u8 link_type, u8 addr_type)
6628 {
6629 	struct mgmt_ev_user_passkey_request ev;
6630 
6631 	BT_DBG("%s", hdev->name);
6632 
6633 	bacpy(&ev.addr.bdaddr, bdaddr);
6634 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6635 
6636 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6637 			  NULL);
6638 }
6639 
6640 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6641 				      u8 link_type, u8 addr_type, u8 status,
6642 				      u8 opcode)
6643 {
6644 	struct pending_cmd *cmd;
6645 
6646 	cmd = mgmt_pending_find(opcode, hdev);
6647 	if (!cmd)
6648 		return -ENOENT;
6649 
6650 	cmd->cmd_complete(cmd, mgmt_status(status));
6651 	mgmt_pending_remove(cmd);
6652 
6653 	return 0;
6654 }
6655 
6656 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6657 				     u8 link_type, u8 addr_type, u8 status)
6658 {
6659 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6660 					  status, MGMT_OP_USER_CONFIRM_REPLY);
6661 }
6662 
6663 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6664 					 u8 link_type, u8 addr_type, u8 status)
6665 {
6666 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6667 					  status,
6668 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
6669 }
6670 
6671 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6672 				     u8 link_type, u8 addr_type, u8 status)
6673 {
6674 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6675 					  status, MGMT_OP_USER_PASSKEY_REPLY);
6676 }
6677 
6678 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6679 					 u8 link_type, u8 addr_type, u8 status)
6680 {
6681 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6682 					  status,
6683 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
6684 }
6685 
6686 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6687 			     u8 link_type, u8 addr_type, u32 passkey,
6688 			     u8 entered)
6689 {
6690 	struct mgmt_ev_passkey_notify ev;
6691 
6692 	BT_DBG("%s", hdev->name);
6693 
6694 	bacpy(&ev.addr.bdaddr, bdaddr);
6695 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6696 	ev.passkey = __cpu_to_le32(passkey);
6697 	ev.entered = entered;
6698 
6699 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6700 }
6701 
6702 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6703 {
6704 	struct mgmt_ev_auth_failed ev;
6705 	struct pending_cmd *cmd;
6706 	u8 status = mgmt_status(hci_status);
6707 
6708 	bacpy(&ev.addr.bdaddr, &conn->dst);
6709 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6710 	ev.status = status;
6711 
6712 	cmd = find_pairing(conn);
6713 
6714 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6715 		    cmd ? cmd->sk : NULL);
6716 
6717 	if (cmd) {
6718 		cmd->cmd_complete(cmd, status);
6719 		mgmt_pending_remove(cmd);
6720 	}
6721 }
6722 
6723 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6724 {
6725 	struct cmd_lookup match = { NULL, hdev };
6726 	bool changed;
6727 
6728 	if (status) {
6729 		u8 mgmt_err = mgmt_status(status);
6730 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6731 				     cmd_status_rsp, &mgmt_err);
6732 		return;
6733 	}
6734 
6735 	if (test_bit(HCI_AUTH, &hdev->flags))
6736 		changed = !test_and_set_bit(HCI_LINK_SECURITY,
6737 					    &hdev->dev_flags);
6738 	else
6739 		changed = test_and_clear_bit(HCI_LINK_SECURITY,
6740 					     &hdev->dev_flags);
6741 
6742 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6743 			     &match);
6744 
6745 	if (changed)
6746 		new_settings(hdev, match.sk);
6747 
6748 	if (match.sk)
6749 		sock_put(match.sk);
6750 }
6751 
6752 static void clear_eir(struct hci_request *req)
6753 {
6754 	struct hci_dev *hdev = req->hdev;
6755 	struct hci_cp_write_eir cp;
6756 
6757 	if (!lmp_ext_inq_capable(hdev))
6758 		return;
6759 
6760 	memset(hdev->eir, 0, sizeof(hdev->eir));
6761 
6762 	memset(&cp, 0, sizeof(cp));
6763 
6764 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6765 }
6766 
6767 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6768 {
6769 	struct cmd_lookup match = { NULL, hdev };
6770 	struct hci_request req;
6771 	bool changed = false;
6772 
6773 	if (status) {
6774 		u8 mgmt_err = mgmt_status(status);
6775 
6776 		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6777 						 &hdev->dev_flags)) {
6778 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6779 			new_settings(hdev, NULL);
6780 		}
6781 
6782 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6783 				     &mgmt_err);
6784 		return;
6785 	}
6786 
6787 	if (enable) {
6788 		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6789 	} else {
6790 		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6791 		if (!changed)
6792 			changed = test_and_clear_bit(HCI_HS_ENABLED,
6793 						     &hdev->dev_flags);
6794 		else
6795 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6796 	}
6797 
6798 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6799 
6800 	if (changed)
6801 		new_settings(hdev, match.sk);
6802 
6803 	if (match.sk)
6804 		sock_put(match.sk);
6805 
6806 	hci_req_init(&req, hdev);
6807 
6808 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6809 		if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6810 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6811 				    sizeof(enable), &enable);
6812 		update_eir(&req);
6813 	} else {
6814 		clear_eir(&req);
6815 	}
6816 
6817 	hci_req_run(&req, NULL);
6818 }
6819 
6820 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6821 {
6822 	struct cmd_lookup match = { NULL, hdev };
6823 	bool changed = false;
6824 
6825 	if (status) {
6826 		u8 mgmt_err = mgmt_status(status);
6827 
6828 		if (enable) {
6829 			if (test_and_clear_bit(HCI_SC_ENABLED,
6830 					       &hdev->dev_flags))
6831 				new_settings(hdev, NULL);
6832 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6833 		}
6834 
6835 		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6836 				     cmd_status_rsp, &mgmt_err);
6837 		return;
6838 	}
6839 
6840 	if (enable) {
6841 		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6842 	} else {
6843 		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6844 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6845 	}
6846 
6847 	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6848 			     settings_rsp, &match);
6849 
6850 	if (changed)
6851 		new_settings(hdev, match.sk);
6852 
6853 	if (match.sk)
6854 		sock_put(match.sk);
6855 }
6856 
6857 static void sk_lookup(struct pending_cmd *cmd, void *data)
6858 {
6859 	struct cmd_lookup *match = data;
6860 
6861 	if (match->sk == NULL) {
6862 		match->sk = cmd->sk;
6863 		sock_hold(match->sk);
6864 	}
6865 }
6866 
6867 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6868 				    u8 status)
6869 {
6870 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6871 
6872 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6873 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6874 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6875 
6876 	if (!status)
6877 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6878 			   NULL);
6879 
6880 	if (match.sk)
6881 		sock_put(match.sk);
6882 }
6883 
6884 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6885 {
6886 	struct mgmt_cp_set_local_name ev;
6887 	struct pending_cmd *cmd;
6888 
6889 	if (status)
6890 		return;
6891 
6892 	memset(&ev, 0, sizeof(ev));
6893 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6894 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6895 
6896 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6897 	if (!cmd) {
6898 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6899 
6900 		/* If this is a HCI command related to powering on the
6901 		 * HCI dev don't send any mgmt signals.
6902 		 */
6903 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6904 			return;
6905 	}
6906 
6907 	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6908 		   cmd ? cmd->sk : NULL);
6909 }
6910 
6911 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6912 				       u8 *rand192, u8 *hash256, u8 *rand256,
6913 				       u8 status)
6914 {
6915 	struct pending_cmd *cmd;
6916 
6917 	BT_DBG("%s status %u", hdev->name, status);
6918 
6919 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6920 	if (!cmd)
6921 		return;
6922 
6923 	if (status) {
6924 		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6925 			   mgmt_status(status));
6926 	} else {
6927 		if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6928 			struct mgmt_rp_read_local_oob_ext_data rp;
6929 
6930 			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6931 			memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6932 
6933 			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6934 			memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6935 
6936 			cmd_complete(cmd->sk, hdev->id,
6937 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6938 				     &rp, sizeof(rp));
6939 		} else {
6940 			struct mgmt_rp_read_local_oob_data rp;
6941 
6942 			memcpy(rp.hash, hash192, sizeof(rp.hash));
6943 			memcpy(rp.rand, rand192, sizeof(rp.rand));
6944 
6945 			cmd_complete(cmd->sk, hdev->id,
6946 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6947 				     &rp, sizeof(rp));
6948 		}
6949 	}
6950 
6951 	mgmt_pending_remove(cmd);
6952 }
6953 
6954 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
6955 {
6956 	int i;
6957 
6958 	for (i = 0; i < uuid_count; i++) {
6959 		if (!memcmp(uuid, uuids[i], 16))
6960 			return true;
6961 	}
6962 
6963 	return false;
6964 }
6965 
6966 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
6967 {
6968 	u16 parsed = 0;
6969 
6970 	while (parsed < eir_len) {
6971 		u8 field_len = eir[0];
6972 		u8 uuid[16];
6973 		int i;
6974 
6975 		if (field_len == 0)
6976 			break;
6977 
6978 		if (eir_len - parsed < field_len + 1)
6979 			break;
6980 
6981 		switch (eir[1]) {
6982 		case EIR_UUID16_ALL:
6983 		case EIR_UUID16_SOME:
6984 			for (i = 0; i + 3 <= field_len; i += 2) {
6985 				memcpy(uuid, bluetooth_base_uuid, 16);
6986 				uuid[13] = eir[i + 3];
6987 				uuid[12] = eir[i + 2];
6988 				if (has_uuid(uuid, uuid_count, uuids))
6989 					return true;
6990 			}
6991 			break;
6992 		case EIR_UUID32_ALL:
6993 		case EIR_UUID32_SOME:
6994 			for (i = 0; i + 5 <= field_len; i += 4) {
6995 				memcpy(uuid, bluetooth_base_uuid, 16);
6996 				uuid[15] = eir[i + 5];
6997 				uuid[14] = eir[i + 4];
6998 				uuid[13] = eir[i + 3];
6999 				uuid[12] = eir[i + 2];
7000 				if (has_uuid(uuid, uuid_count, uuids))
7001 					return true;
7002 			}
7003 			break;
7004 		case EIR_UUID128_ALL:
7005 		case EIR_UUID128_SOME:
7006 			for (i = 0; i + 17 <= field_len; i += 16) {
7007 				memcpy(uuid, eir + i + 2, 16);
7008 				if (has_uuid(uuid, uuid_count, uuids))
7009 					return true;
7010 			}
7011 			break;
7012 		}
7013 
7014 		parsed += field_len + 1;
7015 		eir += field_len + 1;
7016 	}
7017 
7018 	return false;
7019 }
7020 
7021 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7022 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7023 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7024 {
7025 	char buf[512];
7026 	struct mgmt_ev_device_found *ev = (void *) buf;
7027 	size_t ev_size;
7028 	bool match;
7029 
7030 	/* Don't send events for a non-kernel initiated discovery. With
7031 	 * LE one exception is if we have pend_le_reports > 0 in which
7032 	 * case we're doing passive scanning and want these events.
7033 	 */
7034 	if (!hci_discovery_active(hdev)) {
7035 		if (link_type == ACL_LINK)
7036 			return;
7037 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7038 			return;
7039 	}
7040 
7041 	/* When using service discovery with a RSSI threshold, then check
7042 	 * if such a RSSI threshold is specified. If a RSSI threshold has
7043 	 * been specified, then all results with a RSSI smaller than the
7044 	 * RSSI threshold will be dropped.
7045 	 *
7046 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7047 	 * the results are also dropped.
7048 	 */
7049 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7050 	    (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7051 		return;
7052 
7053 	/* Make sure that the buffer is big enough. The 5 extra bytes
7054 	 * are for the potential CoD field.
7055 	 */
7056 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7057 		return;
7058 
7059 	memset(buf, 0, sizeof(buf));
7060 
7061 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
7062 	 * RSSI value was reported as 0 when not available. This behavior
7063 	 * is kept when using device discovery. This is required for full
7064 	 * backwards compatibility with the API.
7065 	 *
7066 	 * However when using service discovery, the value 127 will be
7067 	 * returned when the RSSI is not available.
7068 	 */
7069 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7070 		rssi = 0;
7071 
7072 	bacpy(&ev->addr.bdaddr, bdaddr);
7073 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7074 	ev->rssi = rssi;
7075 	ev->flags = cpu_to_le32(flags);
7076 
7077 	if (eir_len > 0) {
7078 		/* When using service discovery and a list of UUID is
7079 		 * provided, results with no matching UUID should be
7080 		 * dropped. In case there is a match the result is
7081 		 * kept and checking possible scan response data
7082 		 * will be skipped.
7083 		 */
7084 		if (hdev->discovery.uuid_count > 0)
7085 			match = eir_has_uuids(eir, eir_len,
7086 					      hdev->discovery.uuid_count,
7087 					      hdev->discovery.uuids);
7088 		else
7089 			match = true;
7090 
7091 		if (!match && !scan_rsp_len)
7092 			return;
7093 
7094 		/* Copy EIR or advertising data into event */
7095 		memcpy(ev->eir, eir, eir_len);
7096 	} else {
7097 		/* When using service discovery and a list of UUID is
7098 		 * provided, results with empty EIR or advertising data
7099 		 * should be dropped since they do not match any UUID.
7100 		 */
7101 		if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7102 			return;
7103 
7104 		match = false;
7105 	}
7106 
7107 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7108 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7109 					  dev_class, 3);
7110 
7111 	if (scan_rsp_len > 0) {
7112 		/* When using service discovery and a list of UUID is
7113 		 * provided, results with no matching UUID should be
7114 		 * dropped if there is no previous match from the
7115 		 * advertising data.
7116 		 */
7117 		if (hdev->discovery.uuid_count > 0) {
7118 			if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7119 						     hdev->discovery.uuid_count,
7120 						     hdev->discovery.uuids))
7121 				return;
7122 		}
7123 
7124 		/* Append scan response data to event */
7125 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7126 	} else {
7127 		/* When using service discovery and a list of UUID is
7128 		 * provided, results with empty scan response and no
7129 		 * previous matched advertising data should be dropped.
7130 		 */
7131 		if (hdev->discovery.uuid_count > 0 && !match)
7132 			return;
7133 	}
7134 
7135 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7136 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7137 
7138 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7139 }
7140 
7141 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7142 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7143 {
7144 	struct mgmt_ev_device_found *ev;
7145 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7146 	u16 eir_len;
7147 
7148 	ev = (struct mgmt_ev_device_found *) buf;
7149 
7150 	memset(buf, 0, sizeof(buf));
7151 
7152 	bacpy(&ev->addr.bdaddr, bdaddr);
7153 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7154 	ev->rssi = rssi;
7155 
7156 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7157 				  name_len);
7158 
7159 	ev->eir_len = cpu_to_le16(eir_len);
7160 
7161 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7162 }
7163 
7164 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7165 {
7166 	struct mgmt_ev_discovering ev;
7167 
7168 	BT_DBG("%s discovering %u", hdev->name, discovering);
7169 
7170 	memset(&ev, 0, sizeof(ev));
7171 	ev.type = hdev->discovery.type;
7172 	ev.discovering = discovering;
7173 
7174 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7175 }
7176 
7177 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
7178 {
7179 	BT_DBG("%s status %u", hdev->name, status);
7180 }
7181 
7182 void mgmt_reenable_advertising(struct hci_dev *hdev)
7183 {
7184 	struct hci_request req;
7185 
7186 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7187 		return;
7188 
7189 	hci_req_init(&req, hdev);
7190 	enable_advertising(&req);
7191 	hci_req_run(&req, adv_enable_complete);
7192 }
7193