xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 064dd929)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
855 
856 	return settings;
857 }
858 
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 	u32 settings = 0;
862 
863 	if (hdev_is_powered(hdev))
864 		settings |= MGMT_SETTING_POWERED;
865 
866 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 		settings |= MGMT_SETTING_CONNECTABLE;
868 
869 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 		settings |= MGMT_SETTING_DISCOVERABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 		settings |= MGMT_SETTING_BONDABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 		settings |= MGMT_SETTING_BREDR;
880 
881 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 		settings |= MGMT_SETTING_LE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 		settings |= MGMT_SETTING_LINK_SECURITY;
886 
887 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 		settings |= MGMT_SETTING_SSP;
889 
890 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 		settings |= MGMT_SETTING_ADVERTISING;
892 
893 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 		settings |= MGMT_SETTING_SECURE_CONN;
895 
896 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 		settings |= MGMT_SETTING_DEBUG_KEYS;
898 
899 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 		settings |= MGMT_SETTING_PRIVACY;
901 
902 	/* The current setting for static address has two purposes. The
903 	 * first is to indicate if the static address will be used and
904 	 * the second is to indicate if it is actually set.
905 	 *
906 	 * This means if the static address is not configured, this flag
907 	 * will never be set. If the address is configured, then if the
908 	 * address is actually used decides if the flag is set or not.
909 	 *
910 	 * For single mode LE only controllers and dual-mode controllers
911 	 * with BR/EDR disabled, the existence of the static address will
912 	 * be evaluated.
913 	 */
914 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 			settings |= MGMT_SETTING_STATIC_ADDRESS;
919 	}
920 
921 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923 
924 	if (cis_central_capable(hdev))
925 		settings |= MGMT_SETTING_CIS_CENTRAL;
926 
927 	if (cis_peripheral_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
929 
930 	if (bis_capable(hdev))
931 		settings |= MGMT_SETTING_ISO_BROADCASTER;
932 
933 	if (sync_recv_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935 
936 	return settings;
937 }
938 
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943 
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 	struct mgmt_pending_cmd *cmd;
947 
948 	/* If there's a pending mgmt command the flags will not yet have
949 	 * their final values, so check for this first.
950 	 */
951 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 	if (cmd) {
953 		struct mgmt_mode *cp = cmd->param;
954 		if (cp->val == 0x01)
955 			return LE_AD_GENERAL;
956 		else if (cp->val == 0x02)
957 			return LE_AD_LIMITED;
958 	} else {
959 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 			return LE_AD_LIMITED;
961 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 			return LE_AD_GENERAL;
963 	}
964 
965 	return 0;
966 }
967 
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 	struct mgmt_pending_cmd *cmd;
971 
972 	/* If there's a pending mgmt command the flag will not yet have
973 	 * it's final value, so check for this first.
974 	 */
975 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 	if (cmd) {
977 		struct mgmt_mode *cp = cmd->param;
978 
979 		return cp->val;
980 	}
981 
982 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984 
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 	hci_update_eir_sync(hdev);
988 	hci_update_class_sync(hdev);
989 
990 	return 0;
991 }
992 
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 	struct hci_dev *hdev = container_of(work, struct hci_dev,
996 					    service_cache.work);
997 
998 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 		return;
1000 
1001 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003 
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 	/* The generation of a new RPA and programming it into the
1007 	 * controller happens in the hci_req_enable_advertising()
1008 	 * function.
1009 	 */
1010 	if (ext_adv_capable(hdev))
1011 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 	else
1013 		return hci_enable_advertising_sync(hdev);
1014 }
1015 
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 					    rpa_expired.work);
1020 
1021 	bt_dev_dbg(hdev, "");
1022 
1023 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 
1025 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 		return;
1027 
1028 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030 
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032 
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 					    discov_off.work);
1037 
1038 	bt_dev_dbg(hdev, "");
1039 
1040 	hci_dev_lock(hdev);
1041 
1042 	/* When discoverable timeout triggers, then just make sure
1043 	 * the limited discoverable flag is cleared. Even in the case
1044 	 * of a timeout triggered from general discoverable, it is
1045 	 * safe to unconditionally clear the flag.
1046 	 */
1047 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 	hdev->discov_timeout = 0;
1050 
1051 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052 
1053 	mgmt_new_settings(hdev);
1054 
1055 	hci_dev_unlock(hdev);
1056 }
1057 
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059 
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 	u8 handle = mesh_tx->handle;
1064 
1065 	if (!silent)
1066 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 			   sizeof(handle), NULL);
1068 
1069 	mgmt_mesh_remove(mesh_tx);
1070 }
1071 
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 	struct mgmt_mesh_tx *mesh_tx;
1075 
1076 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 	hci_disable_advertising_sync(hdev);
1078 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1079 
1080 	if (mesh_tx)
1081 		mesh_send_complete(hdev, mesh_tx, false);
1082 
1083 	return 0;
1084 }
1085 
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (!mesh_tx)
1093 		return;
1094 
1095 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 				 mesh_send_start_complete);
1097 
1098 	if (err < 0)
1099 		mesh_send_complete(hdev, mesh_tx, false);
1100 	else
1101 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103 
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 					    mesh_send_done.work);
1108 
1109 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 		return;
1111 
1112 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114 
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 		return;
1119 
1120 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121 
1122 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126 
1127 	/* Non-mgmt controlled devices get this bit set
1128 	 * implicitly so that pairing works for them, however
1129 	 * for mgmt we require user-space to explicitly enable
1130 	 * it
1131 	 */
1132 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133 
1134 	hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136 
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 				void *data, u16 data_len)
1139 {
1140 	struct mgmt_rp_read_info rp;
1141 
1142 	bt_dev_dbg(hdev, "sock %p", sk);
1143 
1144 	hci_dev_lock(hdev);
1145 
1146 	memset(&rp, 0, sizeof(rp));
1147 
1148 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1149 
1150 	rp.version = hdev->hci_ver;
1151 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152 
1153 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	memcpy(rp.dev_class, hdev->dev_class, 3);
1157 
1158 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160 
1161 	hci_dev_unlock(hdev);
1162 
1163 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 				 sizeof(rp));
1165 }
1166 
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 	u16 eir_len = 0;
1170 	size_t name_len;
1171 
1172 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 					  hdev->dev_class, 3);
1175 
1176 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 					  hdev->appearance);
1179 
1180 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 				  hdev->dev_name, name_len);
1183 
1184 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 				  hdev->short_name, name_len);
1187 
1188 	return eir_len;
1189 }
1190 
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 				    void *data, u16 data_len)
1193 {
1194 	char buf[512];
1195 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 	u16 eir_len;
1197 
1198 	bt_dev_dbg(hdev, "sock %p", sk);
1199 
1200 	memset(&buf, 0, sizeof(buf));
1201 
1202 	hci_dev_lock(hdev);
1203 
1204 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1205 
1206 	rp->version = hdev->hci_ver;
1207 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208 
1209 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211 
1212 
1213 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 	rp->eir_len = cpu_to_le16(eir_len);
1215 
1216 	hci_dev_unlock(hdev);
1217 
1218 	/* If this command is called at least once, then the events
1219 	 * for class of device and local name changes are disabled
1220 	 * and only the new extended controller information event
1221 	 * is used.
1222 	 */
1223 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226 
1227 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 				 sizeof(*rp) + eir_len);
1229 }
1230 
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 	char buf[512];
1234 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 	u16 eir_len;
1236 
1237 	memset(buf, 0, sizeof(buf));
1238 
1239 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 	ev->eir_len = cpu_to_le16(eir_len);
1241 
1242 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 				  sizeof(*ev) + eir_len,
1244 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246 
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 
1251 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 				 sizeof(settings));
1253 }
1254 
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 	struct mgmt_ev_advertising_added ev;
1258 
1259 	ev.instance = instance;
1260 
1261 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263 
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 			      u8 instance)
1266 {
1267 	struct mgmt_ev_advertising_removed ev;
1268 
1269 	ev.instance = instance;
1270 
1271 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273 
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 	if (hdev->adv_instance_timeout) {
1277 		hdev->adv_instance_timeout = 0;
1278 		cancel_delayed_work(&hdev->adv_instance_expire);
1279 	}
1280 }
1281 
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 	struct hci_conn_params *p;
1286 
1287 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 		/* Needed for AUTO_OFF case where might not "really"
1289 		 * have been powered off.
1290 		 */
1291 		hci_pend_le_list_del_init(p);
1292 
1293 		switch (p->auto_connect) {
1294 		case HCI_AUTO_CONN_DIRECT:
1295 		case HCI_AUTO_CONN_ALWAYS:
1296 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 			break;
1298 		case HCI_AUTO_CONN_REPORT:
1299 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 			break;
1301 		default:
1302 			break;
1303 		}
1304 	}
1305 }
1306 
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1310 
1311 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314 
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 	struct mgmt_pending_cmd *cmd = data;
1318 	struct mgmt_mode *cp;
1319 
1320 	/* Make sure cmd still outstanding. */
1321 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1322 		return;
1323 
1324 	cp = cmd->param;
1325 
1326 	bt_dev_dbg(hdev, "err %d", err);
1327 
1328 	if (!err) {
1329 		if (cp->val) {
1330 			hci_dev_lock(hdev);
1331 			restart_le_actions(hdev);
1332 			hci_update_passive_scan(hdev);
1333 			hci_dev_unlock(hdev);
1334 		}
1335 
1336 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1337 
1338 		/* Only call new_setting for power on as power off is deferred
1339 		 * to hdev->power_off work which does call hci_dev_do_close.
1340 		 */
1341 		if (cp->val)
1342 			new_settings(hdev, cmd->sk);
1343 	} else {
1344 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1345 				mgmt_status(err));
1346 	}
1347 
1348 	mgmt_pending_remove(cmd);
1349 }
1350 
set_powered_sync(struct hci_dev * hdev,void * data)1351 static int set_powered_sync(struct hci_dev *hdev, void *data)
1352 {
1353 	struct mgmt_pending_cmd *cmd = data;
1354 	struct mgmt_mode *cp = cmd->param;
1355 
1356 	BT_DBG("%s", hdev->name);
1357 
1358 	return hci_set_powered_sync(hdev, cp->val);
1359 }
1360 
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1361 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1362 		       u16 len)
1363 {
1364 	struct mgmt_mode *cp = data;
1365 	struct mgmt_pending_cmd *cmd;
1366 	int err;
1367 
1368 	bt_dev_dbg(hdev, "sock %p", sk);
1369 
1370 	if (cp->val != 0x00 && cp->val != 0x01)
1371 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1372 				       MGMT_STATUS_INVALID_PARAMS);
1373 
1374 	hci_dev_lock(hdev);
1375 
1376 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1377 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 				      MGMT_STATUS_BUSY);
1379 		goto failed;
1380 	}
1381 
1382 	if (!!cp->val == hdev_is_powered(hdev)) {
1383 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1384 		goto failed;
1385 	}
1386 
1387 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1388 	if (!cmd) {
1389 		err = -ENOMEM;
1390 		goto failed;
1391 	}
1392 
1393 	/* Cancel potentially blocking sync operation before power off */
1394 	if (cp->val == 0x00) {
1395 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1396 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1397 					 mgmt_set_powered_complete);
1398 	} else {
1399 		/* Use hci_cmd_sync_submit since hdev might not be running */
1400 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1401 					  mgmt_set_powered_complete);
1402 	}
1403 
1404 	if (err < 0)
1405 		mgmt_pending_remove(cmd);
1406 
1407 failed:
1408 	hci_dev_unlock(hdev);
1409 	return err;
1410 }
1411 
mgmt_new_settings(struct hci_dev * hdev)1412 int mgmt_new_settings(struct hci_dev *hdev)
1413 {
1414 	return new_settings(hdev, NULL);
1415 }
1416 
1417 struct cmd_lookup {
1418 	struct sock *sk;
1419 	struct hci_dev *hdev;
1420 	u8 mgmt_status;
1421 };
1422 
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1423 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1424 {
1425 	struct cmd_lookup *match = data;
1426 
1427 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1428 
1429 	list_del(&cmd->list);
1430 
1431 	if (match->sk == NULL) {
1432 		match->sk = cmd->sk;
1433 		sock_hold(match->sk);
1434 	}
1435 
1436 	mgmt_pending_free(cmd);
1437 }
1438 
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1439 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1440 {
1441 	u8 *status = data;
1442 
1443 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1444 	mgmt_pending_remove(cmd);
1445 }
1446 
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1447 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 {
1449 	if (cmd->cmd_complete) {
1450 		u8 *status = data;
1451 
1452 		cmd->cmd_complete(cmd, *status);
1453 		mgmt_pending_remove(cmd);
1454 
1455 		return;
1456 	}
1457 
1458 	cmd_status_rsp(cmd, data);
1459 }
1460 
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1461 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1462 {
1463 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1464 				 cmd->param, cmd->param_len);
1465 }
1466 
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1467 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1468 {
1469 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1470 				 cmd->param, sizeof(struct mgmt_addr_info));
1471 }
1472 
mgmt_bredr_support(struct hci_dev * hdev)1473 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1474 {
1475 	if (!lmp_bredr_capable(hdev))
1476 		return MGMT_STATUS_NOT_SUPPORTED;
1477 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1478 		return MGMT_STATUS_REJECTED;
1479 	else
1480 		return MGMT_STATUS_SUCCESS;
1481 }
1482 
mgmt_le_support(struct hci_dev * hdev)1483 static u8 mgmt_le_support(struct hci_dev *hdev)
1484 {
1485 	if (!lmp_le_capable(hdev))
1486 		return MGMT_STATUS_NOT_SUPPORTED;
1487 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1488 		return MGMT_STATUS_REJECTED;
1489 	else
1490 		return MGMT_STATUS_SUCCESS;
1491 }
1492 
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1493 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1494 					   int err)
1495 {
1496 	struct mgmt_pending_cmd *cmd = data;
1497 
1498 	bt_dev_dbg(hdev, "err %d", err);
1499 
1500 	/* Make sure cmd still outstanding. */
1501 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1502 		return;
1503 
1504 	hci_dev_lock(hdev);
1505 
1506 	if (err) {
1507 		u8 mgmt_err = mgmt_status(err);
1508 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1509 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1510 		goto done;
1511 	}
1512 
1513 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 	    hdev->discov_timeout > 0) {
1515 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1516 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1517 	}
1518 
1519 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1520 	new_settings(hdev, cmd->sk);
1521 
1522 done:
1523 	mgmt_pending_remove(cmd);
1524 	hci_dev_unlock(hdev);
1525 }
1526 
set_discoverable_sync(struct hci_dev * hdev,void * data)1527 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1528 {
1529 	BT_DBG("%s", hdev->name);
1530 
1531 	return hci_update_discoverable_sync(hdev);
1532 }
1533 
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1534 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1535 			    u16 len)
1536 {
1537 	struct mgmt_cp_set_discoverable *cp = data;
1538 	struct mgmt_pending_cmd *cmd;
1539 	u16 timeout;
1540 	int err;
1541 
1542 	bt_dev_dbg(hdev, "sock %p", sk);
1543 
1544 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1545 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1546 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1547 				       MGMT_STATUS_REJECTED);
1548 
1549 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1550 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1551 				       MGMT_STATUS_INVALID_PARAMS);
1552 
1553 	timeout = __le16_to_cpu(cp->timeout);
1554 
1555 	/* Disabling discoverable requires that no timeout is set,
1556 	 * and enabling limited discoverable requires a timeout.
1557 	 */
1558 	if ((cp->val == 0x00 && timeout > 0) ||
1559 	    (cp->val == 0x02 && timeout == 0))
1560 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1561 				       MGMT_STATUS_INVALID_PARAMS);
1562 
1563 	hci_dev_lock(hdev);
1564 
1565 	if (!hdev_is_powered(hdev) && timeout > 0) {
1566 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 				      MGMT_STATUS_NOT_POWERED);
1568 		goto failed;
1569 	}
1570 
1571 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1572 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1573 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 				      MGMT_STATUS_BUSY);
1575 		goto failed;
1576 	}
1577 
1578 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1579 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 				      MGMT_STATUS_REJECTED);
1581 		goto failed;
1582 	}
1583 
1584 	if (hdev->advertising_paused) {
1585 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 				      MGMT_STATUS_BUSY);
1587 		goto failed;
1588 	}
1589 
1590 	if (!hdev_is_powered(hdev)) {
1591 		bool changed = false;
1592 
1593 		/* Setting limited discoverable when powered off is
1594 		 * not a valid operation since it requires a timeout
1595 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1596 		 */
1597 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1598 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1599 			changed = true;
1600 		}
1601 
1602 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1603 		if (err < 0)
1604 			goto failed;
1605 
1606 		if (changed)
1607 			err = new_settings(hdev, sk);
1608 
1609 		goto failed;
1610 	}
1611 
1612 	/* If the current mode is the same, then just update the timeout
1613 	 * value with the new value. And if only the timeout gets updated,
1614 	 * then no need for any HCI transactions.
1615 	 */
1616 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1617 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1618 						   HCI_LIMITED_DISCOVERABLE)) {
1619 		cancel_delayed_work(&hdev->discov_off);
1620 		hdev->discov_timeout = timeout;
1621 
1622 		if (cp->val && hdev->discov_timeout > 0) {
1623 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1624 			queue_delayed_work(hdev->req_workqueue,
1625 					   &hdev->discov_off, to);
1626 		}
1627 
1628 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1629 		goto failed;
1630 	}
1631 
1632 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1633 	if (!cmd) {
1634 		err = -ENOMEM;
1635 		goto failed;
1636 	}
1637 
1638 	/* Cancel any potential discoverable timeout that might be
1639 	 * still active and store new timeout value. The arming of
1640 	 * the timeout happens in the complete handler.
1641 	 */
1642 	cancel_delayed_work(&hdev->discov_off);
1643 	hdev->discov_timeout = timeout;
1644 
1645 	if (cp->val)
1646 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1647 	else
1648 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1649 
1650 	/* Limited discoverable mode */
1651 	if (cp->val == 0x02)
1652 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1653 	else
1654 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1655 
1656 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1657 				 mgmt_set_discoverable_complete);
1658 
1659 	if (err < 0)
1660 		mgmt_pending_remove(cmd);
1661 
1662 failed:
1663 	hci_dev_unlock(hdev);
1664 	return err;
1665 }
1666 
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1667 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1668 					  int err)
1669 {
1670 	struct mgmt_pending_cmd *cmd = data;
1671 
1672 	bt_dev_dbg(hdev, "err %d", err);
1673 
1674 	/* Make sure cmd still outstanding. */
1675 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1676 		return;
1677 
1678 	hci_dev_lock(hdev);
1679 
1680 	if (err) {
1681 		u8 mgmt_err = mgmt_status(err);
1682 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1683 		goto done;
1684 	}
1685 
1686 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1687 	new_settings(hdev, cmd->sk);
1688 
1689 done:
1690 	if (cmd)
1691 		mgmt_pending_remove(cmd);
1692 
1693 	hci_dev_unlock(hdev);
1694 }
1695 
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1696 static int set_connectable_update_settings(struct hci_dev *hdev,
1697 					   struct sock *sk, u8 val)
1698 {
1699 	bool changed = false;
1700 	int err;
1701 
1702 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1703 		changed = true;
1704 
1705 	if (val) {
1706 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1707 	} else {
1708 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1709 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1710 	}
1711 
1712 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1713 	if (err < 0)
1714 		return err;
1715 
1716 	if (changed) {
1717 		hci_update_scan(hdev);
1718 		hci_update_passive_scan(hdev);
1719 		return new_settings(hdev, sk);
1720 	}
1721 
1722 	return 0;
1723 }
1724 
set_connectable_sync(struct hci_dev * hdev,void * data)1725 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1726 {
1727 	BT_DBG("%s", hdev->name);
1728 
1729 	return hci_update_connectable_sync(hdev);
1730 }
1731 
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1732 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1733 			   u16 len)
1734 {
1735 	struct mgmt_mode *cp = data;
1736 	struct mgmt_pending_cmd *cmd;
1737 	int err;
1738 
1739 	bt_dev_dbg(hdev, "sock %p", sk);
1740 
1741 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1742 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1744 				       MGMT_STATUS_REJECTED);
1745 
1746 	if (cp->val != 0x00 && cp->val != 0x01)
1747 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1748 				       MGMT_STATUS_INVALID_PARAMS);
1749 
1750 	hci_dev_lock(hdev);
1751 
1752 	if (!hdev_is_powered(hdev)) {
1753 		err = set_connectable_update_settings(hdev, sk, cp->val);
1754 		goto failed;
1755 	}
1756 
1757 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1758 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1759 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 				      MGMT_STATUS_BUSY);
1761 		goto failed;
1762 	}
1763 
1764 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1765 	if (!cmd) {
1766 		err = -ENOMEM;
1767 		goto failed;
1768 	}
1769 
1770 	if (cp->val) {
1771 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1772 	} else {
1773 		if (hdev->discov_timeout > 0)
1774 			cancel_delayed_work(&hdev->discov_off);
1775 
1776 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1777 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1778 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1779 	}
1780 
1781 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1782 				 mgmt_set_connectable_complete);
1783 
1784 	if (err < 0)
1785 		mgmt_pending_remove(cmd);
1786 
1787 failed:
1788 	hci_dev_unlock(hdev);
1789 	return err;
1790 }
1791 
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1792 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1793 			u16 len)
1794 {
1795 	struct mgmt_mode *cp = data;
1796 	bool changed;
1797 	int err;
1798 
1799 	bt_dev_dbg(hdev, "sock %p", sk);
1800 
1801 	if (cp->val != 0x00 && cp->val != 0x01)
1802 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1803 				       MGMT_STATUS_INVALID_PARAMS);
1804 
1805 	hci_dev_lock(hdev);
1806 
1807 	if (cp->val)
1808 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1809 	else
1810 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1811 
1812 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1813 	if (err < 0)
1814 		goto unlock;
1815 
1816 	if (changed) {
1817 		/* In limited privacy mode the change of bondable mode
1818 		 * may affect the local advertising address.
1819 		 */
1820 		hci_update_discoverable(hdev);
1821 
1822 		err = new_settings(hdev, sk);
1823 	}
1824 
1825 unlock:
1826 	hci_dev_unlock(hdev);
1827 	return err;
1828 }
1829 
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1830 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1831 			     u16 len)
1832 {
1833 	struct mgmt_mode *cp = data;
1834 	struct mgmt_pending_cmd *cmd;
1835 	u8 val, status;
1836 	int err;
1837 
1838 	bt_dev_dbg(hdev, "sock %p", sk);
1839 
1840 	status = mgmt_bredr_support(hdev);
1841 	if (status)
1842 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1843 				       status);
1844 
1845 	if (cp->val != 0x00 && cp->val != 0x01)
1846 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1847 				       MGMT_STATUS_INVALID_PARAMS);
1848 
1849 	hci_dev_lock(hdev);
1850 
1851 	if (!hdev_is_powered(hdev)) {
1852 		bool changed = false;
1853 
1854 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1855 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1856 			changed = true;
1857 		}
1858 
1859 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1860 		if (err < 0)
1861 			goto failed;
1862 
1863 		if (changed)
1864 			err = new_settings(hdev, sk);
1865 
1866 		goto failed;
1867 	}
1868 
1869 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1870 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1871 				      MGMT_STATUS_BUSY);
1872 		goto failed;
1873 	}
1874 
1875 	val = !!cp->val;
1876 
1877 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1878 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 		goto failed;
1880 	}
1881 
1882 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1883 	if (!cmd) {
1884 		err = -ENOMEM;
1885 		goto failed;
1886 	}
1887 
1888 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1889 	if (err < 0) {
1890 		mgmt_pending_remove(cmd);
1891 		goto failed;
1892 	}
1893 
1894 failed:
1895 	hci_dev_unlock(hdev);
1896 	return err;
1897 }
1898 
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1899 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1900 {
1901 	struct cmd_lookup match = { NULL, hdev };
1902 	struct mgmt_pending_cmd *cmd = data;
1903 	struct mgmt_mode *cp = cmd->param;
1904 	u8 enable = cp->val;
1905 	bool changed;
1906 
1907 	/* Make sure cmd still outstanding. */
1908 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1909 		return;
1910 
1911 	if (err) {
1912 		u8 mgmt_err = mgmt_status(err);
1913 
1914 		if (enable && hci_dev_test_and_clear_flag(hdev,
1915 							  HCI_SSP_ENABLED)) {
1916 			new_settings(hdev, NULL);
1917 		}
1918 
1919 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1920 				     &mgmt_err);
1921 		return;
1922 	}
1923 
1924 	if (enable) {
1925 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1926 	} else {
1927 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1928 	}
1929 
1930 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1931 
1932 	if (changed)
1933 		new_settings(hdev, match.sk);
1934 
1935 	if (match.sk)
1936 		sock_put(match.sk);
1937 
1938 	hci_update_eir_sync(hdev);
1939 }
1940 
set_ssp_sync(struct hci_dev * hdev,void * data)1941 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1942 {
1943 	struct mgmt_pending_cmd *cmd = data;
1944 	struct mgmt_mode *cp = cmd->param;
1945 	bool changed = false;
1946 	int err;
1947 
1948 	if (cp->val)
1949 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1950 
1951 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1952 
1953 	if (!err && changed)
1954 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1955 
1956 	return err;
1957 }
1958 
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1959 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1960 {
1961 	struct mgmt_mode *cp = data;
1962 	struct mgmt_pending_cmd *cmd;
1963 	u8 status;
1964 	int err;
1965 
1966 	bt_dev_dbg(hdev, "sock %p", sk);
1967 
1968 	status = mgmt_bredr_support(hdev);
1969 	if (status)
1970 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1971 
1972 	if (!lmp_ssp_capable(hdev))
1973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1974 				       MGMT_STATUS_NOT_SUPPORTED);
1975 
1976 	if (cp->val != 0x00 && cp->val != 0x01)
1977 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1978 				       MGMT_STATUS_INVALID_PARAMS);
1979 
1980 	hci_dev_lock(hdev);
1981 
1982 	if (!hdev_is_powered(hdev)) {
1983 		bool changed;
1984 
1985 		if (cp->val) {
1986 			changed = !hci_dev_test_and_set_flag(hdev,
1987 							     HCI_SSP_ENABLED);
1988 		} else {
1989 			changed = hci_dev_test_and_clear_flag(hdev,
1990 							      HCI_SSP_ENABLED);
1991 		}
1992 
1993 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1994 		if (err < 0)
1995 			goto failed;
1996 
1997 		if (changed)
1998 			err = new_settings(hdev, sk);
1999 
2000 		goto failed;
2001 	}
2002 
2003 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2004 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2005 				      MGMT_STATUS_BUSY);
2006 		goto failed;
2007 	}
2008 
2009 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2010 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2011 		goto failed;
2012 	}
2013 
2014 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2015 	if (!cmd)
2016 		err = -ENOMEM;
2017 	else
2018 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2019 					 set_ssp_complete);
2020 
2021 	if (err < 0) {
2022 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2023 				      MGMT_STATUS_FAILED);
2024 
2025 		if (cmd)
2026 			mgmt_pending_remove(cmd);
2027 	}
2028 
2029 failed:
2030 	hci_dev_unlock(hdev);
2031 	return err;
2032 }
2033 
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2034 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2035 {
2036 	bt_dev_dbg(hdev, "sock %p", sk);
2037 
2038 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2039 				       MGMT_STATUS_NOT_SUPPORTED);
2040 }
2041 
set_le_complete(struct hci_dev * hdev,void * data,int err)2042 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2043 {
2044 	struct cmd_lookup match = { NULL, hdev };
2045 	u8 status = mgmt_status(err);
2046 
2047 	bt_dev_dbg(hdev, "err %d", err);
2048 
2049 	if (status) {
2050 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2051 							&status);
2052 		return;
2053 	}
2054 
2055 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2056 
2057 	new_settings(hdev, match.sk);
2058 
2059 	if (match.sk)
2060 		sock_put(match.sk);
2061 }
2062 
set_le_sync(struct hci_dev * hdev,void * data)2063 static int set_le_sync(struct hci_dev *hdev, void *data)
2064 {
2065 	struct mgmt_pending_cmd *cmd = data;
2066 	struct mgmt_mode *cp = cmd->param;
2067 	u8 val = !!cp->val;
2068 	int err;
2069 
2070 	if (!val) {
2071 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2072 
2073 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2074 			hci_disable_advertising_sync(hdev);
2075 
2076 		if (ext_adv_capable(hdev))
2077 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2078 	} else {
2079 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2080 	}
2081 
2082 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2083 
2084 	/* Make sure the controller has a good default for
2085 	 * advertising data. Restrict the update to when LE
2086 	 * has actually been enabled. During power on, the
2087 	 * update in powered_update_hci will take care of it.
2088 	 */
2089 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2090 		if (ext_adv_capable(hdev)) {
2091 			int status;
2092 
2093 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2094 			if (!status)
2095 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2096 		} else {
2097 			hci_update_adv_data_sync(hdev, 0x00);
2098 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2099 		}
2100 
2101 		hci_update_passive_scan(hdev);
2102 	}
2103 
2104 	return err;
2105 }
2106 
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2107 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2108 {
2109 	struct mgmt_pending_cmd *cmd = data;
2110 	u8 status = mgmt_status(err);
2111 	struct sock *sk = cmd->sk;
2112 
2113 	if (status) {
2114 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2115 				     cmd_status_rsp, &status);
2116 		return;
2117 	}
2118 
2119 	mgmt_pending_remove(cmd);
2120 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2121 }
2122 
set_mesh_sync(struct hci_dev * hdev,void * data)2123 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2124 {
2125 	struct mgmt_pending_cmd *cmd = data;
2126 	struct mgmt_cp_set_mesh *cp = cmd->param;
2127 	size_t len = cmd->param_len;
2128 
2129 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2130 
2131 	if (cp->enable)
2132 		hci_dev_set_flag(hdev, HCI_MESH);
2133 	else
2134 		hci_dev_clear_flag(hdev, HCI_MESH);
2135 
2136 	len -= sizeof(*cp);
2137 
2138 	/* If filters don't fit, forward all adv pkts */
2139 	if (len <= sizeof(hdev->mesh_ad_types))
2140 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2141 
2142 	hci_update_passive_scan_sync(hdev);
2143 	return 0;
2144 }
2145 
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2146 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2147 {
2148 	struct mgmt_cp_set_mesh *cp = data;
2149 	struct mgmt_pending_cmd *cmd;
2150 	int err = 0;
2151 
2152 	bt_dev_dbg(hdev, "sock %p", sk);
2153 
2154 	if (!lmp_le_capable(hdev) ||
2155 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2156 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2157 				       MGMT_STATUS_NOT_SUPPORTED);
2158 
2159 	if (cp->enable != 0x00 && cp->enable != 0x01)
2160 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2161 				       MGMT_STATUS_INVALID_PARAMS);
2162 
2163 	hci_dev_lock(hdev);
2164 
2165 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2166 	if (!cmd)
2167 		err = -ENOMEM;
2168 	else
2169 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2170 					 set_mesh_complete);
2171 
2172 	if (err < 0) {
2173 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2174 				      MGMT_STATUS_FAILED);
2175 
2176 		if (cmd)
2177 			mgmt_pending_remove(cmd);
2178 	}
2179 
2180 	hci_dev_unlock(hdev);
2181 	return err;
2182 }
2183 
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2184 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2185 {
2186 	struct mgmt_mesh_tx *mesh_tx = data;
2187 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2188 	unsigned long mesh_send_interval;
2189 	u8 mgmt_err = mgmt_status(err);
2190 
2191 	/* Report any errors here, but don't report completion */
2192 
2193 	if (mgmt_err) {
2194 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2195 		/* Send Complete Error Code for handle */
2196 		mesh_send_complete(hdev, mesh_tx, false);
2197 		return;
2198 	}
2199 
2200 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2201 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2202 			   mesh_send_interval);
2203 }
2204 
mesh_send_sync(struct hci_dev * hdev,void * data)2205 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2206 {
2207 	struct mgmt_mesh_tx *mesh_tx = data;
2208 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2209 	struct adv_info *adv, *next_instance;
2210 	u8 instance = hdev->le_num_of_adv_sets + 1;
2211 	u16 timeout, duration;
2212 	int err = 0;
2213 
2214 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2215 		return MGMT_STATUS_BUSY;
2216 
2217 	timeout = 1000;
2218 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2219 	adv = hci_add_adv_instance(hdev, instance, 0,
2220 				   send->adv_data_len, send->adv_data,
2221 				   0, NULL,
2222 				   timeout, duration,
2223 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2224 				   hdev->le_adv_min_interval,
2225 				   hdev->le_adv_max_interval,
2226 				   mesh_tx->handle);
2227 
2228 	if (!IS_ERR(adv))
2229 		mesh_tx->instance = instance;
2230 	else
2231 		err = PTR_ERR(adv);
2232 
2233 	if (hdev->cur_adv_instance == instance) {
2234 		/* If the currently advertised instance is being changed then
2235 		 * cancel the current advertising and schedule the next
2236 		 * instance. If there is only one instance then the overridden
2237 		 * advertising data will be visible right away.
2238 		 */
2239 		cancel_adv_timeout(hdev);
2240 
2241 		next_instance = hci_get_next_instance(hdev, instance);
2242 		if (next_instance)
2243 			instance = next_instance->instance;
2244 		else
2245 			instance = 0;
2246 	} else if (hdev->adv_instance_timeout) {
2247 		/* Immediately advertise the new instance if no other, or
2248 		 * let it go naturally from queue if ADV is already happening
2249 		 */
2250 		instance = 0;
2251 	}
2252 
2253 	if (instance)
2254 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2255 
2256 	return err;
2257 }
2258 
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2259 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2260 {
2261 	struct mgmt_rp_mesh_read_features *rp = data;
2262 
2263 	if (rp->used_handles >= rp->max_handles)
2264 		return;
2265 
2266 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2267 }
2268 
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2269 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2270 			 void *data, u16 len)
2271 {
2272 	struct mgmt_rp_mesh_read_features rp;
2273 
2274 	if (!lmp_le_capable(hdev) ||
2275 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2277 				       MGMT_STATUS_NOT_SUPPORTED);
2278 
2279 	memset(&rp, 0, sizeof(rp));
2280 	rp.index = cpu_to_le16(hdev->id);
2281 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2282 		rp.max_handles = MESH_HANDLES_MAX;
2283 
2284 	hci_dev_lock(hdev);
2285 
2286 	if (rp.max_handles)
2287 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2288 
2289 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2290 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2291 
2292 	hci_dev_unlock(hdev);
2293 	return 0;
2294 }
2295 
send_cancel(struct hci_dev * hdev,void * data)2296 static int send_cancel(struct hci_dev *hdev, void *data)
2297 {
2298 	struct mgmt_pending_cmd *cmd = data;
2299 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2300 	struct mgmt_mesh_tx *mesh_tx;
2301 
2302 	if (!cancel->handle) {
2303 		do {
2304 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2305 
2306 			if (mesh_tx)
2307 				mesh_send_complete(hdev, mesh_tx, false);
2308 		} while (mesh_tx);
2309 	} else {
2310 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2311 
2312 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2313 			mesh_send_complete(hdev, mesh_tx, false);
2314 	}
2315 
2316 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2317 			  0, NULL, 0);
2318 	mgmt_pending_free(cmd);
2319 
2320 	return 0;
2321 }
2322 
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2323 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2324 			    void *data, u16 len)
2325 {
2326 	struct mgmt_pending_cmd *cmd;
2327 	int err;
2328 
2329 	if (!lmp_le_capable(hdev) ||
2330 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2331 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2332 				       MGMT_STATUS_NOT_SUPPORTED);
2333 
2334 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2335 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2336 				       MGMT_STATUS_REJECTED);
2337 
2338 	hci_dev_lock(hdev);
2339 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2340 	if (!cmd)
2341 		err = -ENOMEM;
2342 	else
2343 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2344 
2345 	if (err < 0) {
2346 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2347 				      MGMT_STATUS_FAILED);
2348 
2349 		if (cmd)
2350 			mgmt_pending_free(cmd);
2351 	}
2352 
2353 	hci_dev_unlock(hdev);
2354 	return err;
2355 }
2356 
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2357 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2358 {
2359 	struct mgmt_mesh_tx *mesh_tx;
2360 	struct mgmt_cp_mesh_send *send = data;
2361 	struct mgmt_rp_mesh_read_features rp;
2362 	bool sending;
2363 	int err = 0;
2364 
2365 	if (!lmp_le_capable(hdev) ||
2366 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2367 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2368 				       MGMT_STATUS_NOT_SUPPORTED);
2369 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2370 	    len <= MGMT_MESH_SEND_SIZE ||
2371 	    len > (MGMT_MESH_SEND_SIZE + 31))
2372 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2373 				       MGMT_STATUS_REJECTED);
2374 
2375 	hci_dev_lock(hdev);
2376 
2377 	memset(&rp, 0, sizeof(rp));
2378 	rp.max_handles = MESH_HANDLES_MAX;
2379 
2380 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2381 
2382 	if (rp.max_handles <= rp.used_handles) {
2383 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2384 				      MGMT_STATUS_BUSY);
2385 		goto done;
2386 	}
2387 
2388 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2389 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2390 
2391 	if (!mesh_tx)
2392 		err = -ENOMEM;
2393 	else if (!sending)
2394 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2395 					 mesh_send_start_complete);
2396 
2397 	if (err < 0) {
2398 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2399 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 				      MGMT_STATUS_FAILED);
2401 
2402 		if (mesh_tx) {
2403 			if (sending)
2404 				mgmt_mesh_remove(mesh_tx);
2405 		}
2406 	} else {
2407 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2408 
2409 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2410 				  &mesh_tx->handle, 1);
2411 	}
2412 
2413 done:
2414 	hci_dev_unlock(hdev);
2415 	return err;
2416 }
2417 
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2418 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2419 {
2420 	struct mgmt_mode *cp = data;
2421 	struct mgmt_pending_cmd *cmd;
2422 	int err;
2423 	u8 val, enabled;
2424 
2425 	bt_dev_dbg(hdev, "sock %p", sk);
2426 
2427 	if (!lmp_le_capable(hdev))
2428 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2429 				       MGMT_STATUS_NOT_SUPPORTED);
2430 
2431 	if (cp->val != 0x00 && cp->val != 0x01)
2432 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2433 				       MGMT_STATUS_INVALID_PARAMS);
2434 
2435 	/* Bluetooth single mode LE only controllers or dual-mode
2436 	 * controllers configured as LE only devices, do not allow
2437 	 * switching LE off. These have either LE enabled explicitly
2438 	 * or BR/EDR has been previously switched off.
2439 	 *
2440 	 * When trying to enable an already enabled LE, then gracefully
2441 	 * send a positive response. Trying to disable it however will
2442 	 * result into rejection.
2443 	 */
2444 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2445 		if (cp->val == 0x01)
2446 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2447 
2448 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2449 				       MGMT_STATUS_REJECTED);
2450 	}
2451 
2452 	hci_dev_lock(hdev);
2453 
2454 	val = !!cp->val;
2455 	enabled = lmp_host_le_capable(hdev);
2456 
2457 	if (!hdev_is_powered(hdev) || val == enabled) {
2458 		bool changed = false;
2459 
2460 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2461 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2462 			changed = true;
2463 		}
2464 
2465 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2466 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2467 			changed = true;
2468 		}
2469 
2470 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2471 		if (err < 0)
2472 			goto unlock;
2473 
2474 		if (changed)
2475 			err = new_settings(hdev, sk);
2476 
2477 		goto unlock;
2478 	}
2479 
2480 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2481 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2483 				      MGMT_STATUS_BUSY);
2484 		goto unlock;
2485 	}
2486 
2487 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2488 	if (!cmd)
2489 		err = -ENOMEM;
2490 	else
2491 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2492 					 set_le_complete);
2493 
2494 	if (err < 0) {
2495 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2496 				      MGMT_STATUS_FAILED);
2497 
2498 		if (cmd)
2499 			mgmt_pending_remove(cmd);
2500 	}
2501 
2502 unlock:
2503 	hci_dev_unlock(hdev);
2504 	return err;
2505 }
2506 
2507 /* This is a helper function to test for pending mgmt commands that can
2508  * cause CoD or EIR HCI commands. We can only allow one such pending
2509  * mgmt command at a time since otherwise we cannot easily track what
2510  * the current values are, will be, and based on that calculate if a new
2511  * HCI command needs to be sent and if yes with what value.
2512  */
pending_eir_or_class(struct hci_dev * hdev)2513 static bool pending_eir_or_class(struct hci_dev *hdev)
2514 {
2515 	struct mgmt_pending_cmd *cmd;
2516 
2517 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2518 		switch (cmd->opcode) {
2519 		case MGMT_OP_ADD_UUID:
2520 		case MGMT_OP_REMOVE_UUID:
2521 		case MGMT_OP_SET_DEV_CLASS:
2522 		case MGMT_OP_SET_POWERED:
2523 			return true;
2524 		}
2525 	}
2526 
2527 	return false;
2528 }
2529 
2530 static const u8 bluetooth_base_uuid[] = {
2531 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2532 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2533 };
2534 
get_uuid_size(const u8 * uuid)2535 static u8 get_uuid_size(const u8 *uuid)
2536 {
2537 	u32 val;
2538 
2539 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2540 		return 128;
2541 
2542 	val = get_unaligned_le32(&uuid[12]);
2543 	if (val > 0xffff)
2544 		return 32;
2545 
2546 	return 16;
2547 }
2548 
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2549 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2550 {
2551 	struct mgmt_pending_cmd *cmd = data;
2552 
2553 	bt_dev_dbg(hdev, "err %d", err);
2554 
2555 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2556 			  mgmt_status(err), hdev->dev_class, 3);
2557 
2558 	mgmt_pending_free(cmd);
2559 }
2560 
add_uuid_sync(struct hci_dev * hdev,void * data)2561 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2562 {
2563 	int err;
2564 
2565 	err = hci_update_class_sync(hdev);
2566 	if (err)
2567 		return err;
2568 
2569 	return hci_update_eir_sync(hdev);
2570 }
2571 
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2572 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2573 {
2574 	struct mgmt_cp_add_uuid *cp = data;
2575 	struct mgmt_pending_cmd *cmd;
2576 	struct bt_uuid *uuid;
2577 	int err;
2578 
2579 	bt_dev_dbg(hdev, "sock %p", sk);
2580 
2581 	hci_dev_lock(hdev);
2582 
2583 	if (pending_eir_or_class(hdev)) {
2584 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2585 				      MGMT_STATUS_BUSY);
2586 		goto failed;
2587 	}
2588 
2589 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2590 	if (!uuid) {
2591 		err = -ENOMEM;
2592 		goto failed;
2593 	}
2594 
2595 	memcpy(uuid->uuid, cp->uuid, 16);
2596 	uuid->svc_hint = cp->svc_hint;
2597 	uuid->size = get_uuid_size(cp->uuid);
2598 
2599 	list_add_tail(&uuid->list, &hdev->uuids);
2600 
2601 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2602 	if (!cmd) {
2603 		err = -ENOMEM;
2604 		goto failed;
2605 	}
2606 
2607 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2608 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2609 	 */
2610 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2611 				  mgmt_class_complete);
2612 	if (err < 0) {
2613 		mgmt_pending_free(cmd);
2614 		goto failed;
2615 	}
2616 
2617 failed:
2618 	hci_dev_unlock(hdev);
2619 	return err;
2620 }
2621 
enable_service_cache(struct hci_dev * hdev)2622 static bool enable_service_cache(struct hci_dev *hdev)
2623 {
2624 	if (!hdev_is_powered(hdev))
2625 		return false;
2626 
2627 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2628 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2629 				   CACHE_TIMEOUT);
2630 		return true;
2631 	}
2632 
2633 	return false;
2634 }
2635 
remove_uuid_sync(struct hci_dev * hdev,void * data)2636 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2637 {
2638 	int err;
2639 
2640 	err = hci_update_class_sync(hdev);
2641 	if (err)
2642 		return err;
2643 
2644 	return hci_update_eir_sync(hdev);
2645 }
2646 
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2647 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2648 		       u16 len)
2649 {
2650 	struct mgmt_cp_remove_uuid *cp = data;
2651 	struct mgmt_pending_cmd *cmd;
2652 	struct bt_uuid *match, *tmp;
2653 	static const u8 bt_uuid_any[] = {
2654 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2655 	};
2656 	int err, found;
2657 
2658 	bt_dev_dbg(hdev, "sock %p", sk);
2659 
2660 	hci_dev_lock(hdev);
2661 
2662 	if (pending_eir_or_class(hdev)) {
2663 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2664 				      MGMT_STATUS_BUSY);
2665 		goto unlock;
2666 	}
2667 
2668 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2669 		hci_uuids_clear(hdev);
2670 
2671 		if (enable_service_cache(hdev)) {
2672 			err = mgmt_cmd_complete(sk, hdev->id,
2673 						MGMT_OP_REMOVE_UUID,
2674 						0, hdev->dev_class, 3);
2675 			goto unlock;
2676 		}
2677 
2678 		goto update_class;
2679 	}
2680 
2681 	found = 0;
2682 
2683 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2684 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2685 			continue;
2686 
2687 		list_del(&match->list);
2688 		kfree(match);
2689 		found++;
2690 	}
2691 
2692 	if (found == 0) {
2693 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2694 				      MGMT_STATUS_INVALID_PARAMS);
2695 		goto unlock;
2696 	}
2697 
2698 update_class:
2699 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2700 	if (!cmd) {
2701 		err = -ENOMEM;
2702 		goto unlock;
2703 	}
2704 
2705 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2706 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2707 	 */
2708 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2709 				  mgmt_class_complete);
2710 	if (err < 0)
2711 		mgmt_pending_free(cmd);
2712 
2713 unlock:
2714 	hci_dev_unlock(hdev);
2715 	return err;
2716 }
2717 
set_class_sync(struct hci_dev * hdev,void * data)2718 static int set_class_sync(struct hci_dev *hdev, void *data)
2719 {
2720 	int err = 0;
2721 
2722 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2723 		cancel_delayed_work_sync(&hdev->service_cache);
2724 		err = hci_update_eir_sync(hdev);
2725 	}
2726 
2727 	if (err)
2728 		return err;
2729 
2730 	return hci_update_class_sync(hdev);
2731 }
2732 
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2733 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2734 			 u16 len)
2735 {
2736 	struct mgmt_cp_set_dev_class *cp = data;
2737 	struct mgmt_pending_cmd *cmd;
2738 	int err;
2739 
2740 	bt_dev_dbg(hdev, "sock %p", sk);
2741 
2742 	if (!lmp_bredr_capable(hdev))
2743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2744 				       MGMT_STATUS_NOT_SUPPORTED);
2745 
2746 	hci_dev_lock(hdev);
2747 
2748 	if (pending_eir_or_class(hdev)) {
2749 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 				      MGMT_STATUS_BUSY);
2751 		goto unlock;
2752 	}
2753 
2754 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 				      MGMT_STATUS_INVALID_PARAMS);
2757 		goto unlock;
2758 	}
2759 
2760 	hdev->major_class = cp->major;
2761 	hdev->minor_class = cp->minor;
2762 
2763 	if (!hdev_is_powered(hdev)) {
2764 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2765 					hdev->dev_class, 3);
2766 		goto unlock;
2767 	}
2768 
2769 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2770 	if (!cmd) {
2771 		err = -ENOMEM;
2772 		goto unlock;
2773 	}
2774 
2775 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2776 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2777 	 */
2778 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2779 				  mgmt_class_complete);
2780 	if (err < 0)
2781 		mgmt_pending_free(cmd);
2782 
2783 unlock:
2784 	hci_dev_unlock(hdev);
2785 	return err;
2786 }
2787 
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2788 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2789 			  u16 len)
2790 {
2791 	struct mgmt_cp_load_link_keys *cp = data;
2792 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2793 				   sizeof(struct mgmt_link_key_info));
2794 	u16 key_count, expected_len;
2795 	bool changed;
2796 	int i;
2797 
2798 	bt_dev_dbg(hdev, "sock %p", sk);
2799 
2800 	if (!lmp_bredr_capable(hdev))
2801 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2802 				       MGMT_STATUS_NOT_SUPPORTED);
2803 
2804 	key_count = __le16_to_cpu(cp->key_count);
2805 	if (key_count > max_key_count) {
2806 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2807 			   key_count);
2808 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2809 				       MGMT_STATUS_INVALID_PARAMS);
2810 	}
2811 
2812 	expected_len = struct_size(cp, keys, key_count);
2813 	if (expected_len != len) {
2814 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2815 			   expected_len, len);
2816 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2817 				       MGMT_STATUS_INVALID_PARAMS);
2818 	}
2819 
2820 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2821 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2822 				       MGMT_STATUS_INVALID_PARAMS);
2823 
2824 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2825 		   key_count);
2826 
2827 	for (i = 0; i < key_count; i++) {
2828 		struct mgmt_link_key_info *key = &cp->keys[i];
2829 
2830 		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
2831 		if (key->type > 0x08)
2832 			return mgmt_cmd_status(sk, hdev->id,
2833 					       MGMT_OP_LOAD_LINK_KEYS,
2834 					       MGMT_STATUS_INVALID_PARAMS);
2835 	}
2836 
2837 	hci_dev_lock(hdev);
2838 
2839 	hci_link_keys_clear(hdev);
2840 
2841 	if (cp->debug_keys)
2842 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2843 	else
2844 		changed = hci_dev_test_and_clear_flag(hdev,
2845 						      HCI_KEEP_DEBUG_KEYS);
2846 
2847 	if (changed)
2848 		new_settings(hdev, NULL);
2849 
2850 	for (i = 0; i < key_count; i++) {
2851 		struct mgmt_link_key_info *key = &cp->keys[i];
2852 
2853 		if (hci_is_blocked_key(hdev,
2854 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2855 				       key->val)) {
2856 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2857 				    &key->addr.bdaddr);
2858 			continue;
2859 		}
2860 
2861 		/* Always ignore debug keys and require a new pairing if
2862 		 * the user wants to use them.
2863 		 */
2864 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2865 			continue;
2866 
2867 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2868 				 key->type, key->pin_len, NULL);
2869 	}
2870 
2871 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2872 
2873 	hci_dev_unlock(hdev);
2874 
2875 	return 0;
2876 }
2877 
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2878 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2879 			   u8 addr_type, struct sock *skip_sk)
2880 {
2881 	struct mgmt_ev_device_unpaired ev;
2882 
2883 	bacpy(&ev.addr.bdaddr, bdaddr);
2884 	ev.addr.type = addr_type;
2885 
2886 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2887 			  skip_sk);
2888 }
2889 
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2890 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2891 {
2892 	struct mgmt_pending_cmd *cmd = data;
2893 	struct mgmt_cp_unpair_device *cp = cmd->param;
2894 
2895 	if (!err)
2896 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2897 
2898 	cmd->cmd_complete(cmd, err);
2899 	mgmt_pending_free(cmd);
2900 }
2901 
unpair_device_sync(struct hci_dev * hdev,void * data)2902 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2903 {
2904 	struct mgmt_pending_cmd *cmd = data;
2905 	struct mgmt_cp_unpair_device *cp = cmd->param;
2906 	struct hci_conn *conn;
2907 
2908 	if (cp->addr.type == BDADDR_BREDR)
2909 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2910 					       &cp->addr.bdaddr);
2911 	else
2912 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2913 					       le_addr_type(cp->addr.type));
2914 
2915 	if (!conn)
2916 		return 0;
2917 
2918 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2919 }
2920 
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2921 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2922 			 u16 len)
2923 {
2924 	struct mgmt_cp_unpair_device *cp = data;
2925 	struct mgmt_rp_unpair_device rp;
2926 	struct hci_conn_params *params;
2927 	struct mgmt_pending_cmd *cmd;
2928 	struct hci_conn *conn;
2929 	u8 addr_type;
2930 	int err;
2931 
2932 	memset(&rp, 0, sizeof(rp));
2933 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2934 	rp.addr.type = cp->addr.type;
2935 
2936 	if (!bdaddr_type_is_valid(cp->addr.type))
2937 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2938 					 MGMT_STATUS_INVALID_PARAMS,
2939 					 &rp, sizeof(rp));
2940 
2941 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2942 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2943 					 MGMT_STATUS_INVALID_PARAMS,
2944 					 &rp, sizeof(rp));
2945 
2946 	hci_dev_lock(hdev);
2947 
2948 	if (!hdev_is_powered(hdev)) {
2949 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2950 					MGMT_STATUS_NOT_POWERED, &rp,
2951 					sizeof(rp));
2952 		goto unlock;
2953 	}
2954 
2955 	if (cp->addr.type == BDADDR_BREDR) {
2956 		/* If disconnection is requested, then look up the
2957 		 * connection. If the remote device is connected, it
2958 		 * will be later used to terminate the link.
2959 		 *
2960 		 * Setting it to NULL explicitly will cause no
2961 		 * termination of the link.
2962 		 */
2963 		if (cp->disconnect)
2964 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2965 						       &cp->addr.bdaddr);
2966 		else
2967 			conn = NULL;
2968 
2969 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2970 		if (err < 0) {
2971 			err = mgmt_cmd_complete(sk, hdev->id,
2972 						MGMT_OP_UNPAIR_DEVICE,
2973 						MGMT_STATUS_NOT_PAIRED, &rp,
2974 						sizeof(rp));
2975 			goto unlock;
2976 		}
2977 
2978 		goto done;
2979 	}
2980 
2981 	/* LE address type */
2982 	addr_type = le_addr_type(cp->addr.type);
2983 
2984 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2985 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2986 	if (err < 0) {
2987 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2988 					MGMT_STATUS_NOT_PAIRED, &rp,
2989 					sizeof(rp));
2990 		goto unlock;
2991 	}
2992 
2993 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2994 	if (!conn) {
2995 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2996 		goto done;
2997 	}
2998 
2999 
3000 	/* Defer clearing up the connection parameters until closing to
3001 	 * give a chance of keeping them if a repairing happens.
3002 	 */
3003 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3004 
3005 	/* Disable auto-connection parameters if present */
3006 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3007 	if (params) {
3008 		if (params->explicit_connect)
3009 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3010 		else
3011 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3012 	}
3013 
3014 	/* If disconnection is not requested, then clear the connection
3015 	 * variable so that the link is not terminated.
3016 	 */
3017 	if (!cp->disconnect)
3018 		conn = NULL;
3019 
3020 done:
3021 	/* If the connection variable is set, then termination of the
3022 	 * link is requested.
3023 	 */
3024 	if (!conn) {
3025 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3026 					&rp, sizeof(rp));
3027 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3028 		goto unlock;
3029 	}
3030 
3031 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3032 			       sizeof(*cp));
3033 	if (!cmd) {
3034 		err = -ENOMEM;
3035 		goto unlock;
3036 	}
3037 
3038 	cmd->cmd_complete = addr_cmd_complete;
3039 
3040 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3041 				 unpair_device_complete);
3042 	if (err < 0)
3043 		mgmt_pending_free(cmd);
3044 
3045 unlock:
3046 	hci_dev_unlock(hdev);
3047 	return err;
3048 }
3049 
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3050 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3051 		      u16 len)
3052 {
3053 	struct mgmt_cp_disconnect *cp = data;
3054 	struct mgmt_rp_disconnect rp;
3055 	struct mgmt_pending_cmd *cmd;
3056 	struct hci_conn *conn;
3057 	int err;
3058 
3059 	bt_dev_dbg(hdev, "sock %p", sk);
3060 
3061 	memset(&rp, 0, sizeof(rp));
3062 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3063 	rp.addr.type = cp->addr.type;
3064 
3065 	if (!bdaddr_type_is_valid(cp->addr.type))
3066 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3067 					 MGMT_STATUS_INVALID_PARAMS,
3068 					 &rp, sizeof(rp));
3069 
3070 	hci_dev_lock(hdev);
3071 
3072 	if (!test_bit(HCI_UP, &hdev->flags)) {
3073 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3074 					MGMT_STATUS_NOT_POWERED, &rp,
3075 					sizeof(rp));
3076 		goto failed;
3077 	}
3078 
3079 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3080 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3081 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3082 		goto failed;
3083 	}
3084 
3085 	if (cp->addr.type == BDADDR_BREDR)
3086 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3087 					       &cp->addr.bdaddr);
3088 	else
3089 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3090 					       le_addr_type(cp->addr.type));
3091 
3092 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3093 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3094 					MGMT_STATUS_NOT_CONNECTED, &rp,
3095 					sizeof(rp));
3096 		goto failed;
3097 	}
3098 
3099 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3100 	if (!cmd) {
3101 		err = -ENOMEM;
3102 		goto failed;
3103 	}
3104 
3105 	cmd->cmd_complete = generic_cmd_complete;
3106 
3107 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3108 	if (err < 0)
3109 		mgmt_pending_remove(cmd);
3110 
3111 failed:
3112 	hci_dev_unlock(hdev);
3113 	return err;
3114 }
3115 
link_to_bdaddr(u8 link_type,u8 addr_type)3116 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3117 {
3118 	switch (link_type) {
3119 	case ISO_LINK:
3120 	case LE_LINK:
3121 		switch (addr_type) {
3122 		case ADDR_LE_DEV_PUBLIC:
3123 			return BDADDR_LE_PUBLIC;
3124 
3125 		default:
3126 			/* Fallback to LE Random address type */
3127 			return BDADDR_LE_RANDOM;
3128 		}
3129 
3130 	default:
3131 		/* Fallback to BR/EDR type */
3132 		return BDADDR_BREDR;
3133 	}
3134 }
3135 
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3136 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3137 			   u16 data_len)
3138 {
3139 	struct mgmt_rp_get_connections *rp;
3140 	struct hci_conn *c;
3141 	int err;
3142 	u16 i;
3143 
3144 	bt_dev_dbg(hdev, "sock %p", sk);
3145 
3146 	hci_dev_lock(hdev);
3147 
3148 	if (!hdev_is_powered(hdev)) {
3149 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3150 				      MGMT_STATUS_NOT_POWERED);
3151 		goto unlock;
3152 	}
3153 
3154 	i = 0;
3155 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3156 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3157 			i++;
3158 	}
3159 
3160 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3161 	if (!rp) {
3162 		err = -ENOMEM;
3163 		goto unlock;
3164 	}
3165 
3166 	i = 0;
3167 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3168 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3169 			continue;
3170 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3171 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3172 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3173 			continue;
3174 		i++;
3175 	}
3176 
3177 	rp->conn_count = cpu_to_le16(i);
3178 
3179 	/* Recalculate length in case of filtered SCO connections, etc */
3180 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3181 				struct_size(rp, addr, i));
3182 
3183 	kfree(rp);
3184 
3185 unlock:
3186 	hci_dev_unlock(hdev);
3187 	return err;
3188 }
3189 
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3190 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3191 				   struct mgmt_cp_pin_code_neg_reply *cp)
3192 {
3193 	struct mgmt_pending_cmd *cmd;
3194 	int err;
3195 
3196 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3197 			       sizeof(*cp));
3198 	if (!cmd)
3199 		return -ENOMEM;
3200 
3201 	cmd->cmd_complete = addr_cmd_complete;
3202 
3203 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3204 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3205 	if (err < 0)
3206 		mgmt_pending_remove(cmd);
3207 
3208 	return err;
3209 }
3210 
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3211 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3212 			  u16 len)
3213 {
3214 	struct hci_conn *conn;
3215 	struct mgmt_cp_pin_code_reply *cp = data;
3216 	struct hci_cp_pin_code_reply reply;
3217 	struct mgmt_pending_cmd *cmd;
3218 	int err;
3219 
3220 	bt_dev_dbg(hdev, "sock %p", sk);
3221 
3222 	hci_dev_lock(hdev);
3223 
3224 	if (!hdev_is_powered(hdev)) {
3225 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3226 				      MGMT_STATUS_NOT_POWERED);
3227 		goto failed;
3228 	}
3229 
3230 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3231 	if (!conn) {
3232 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3233 				      MGMT_STATUS_NOT_CONNECTED);
3234 		goto failed;
3235 	}
3236 
3237 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3238 		struct mgmt_cp_pin_code_neg_reply ncp;
3239 
3240 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3241 
3242 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3243 
3244 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3245 		if (err >= 0)
3246 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3247 					      MGMT_STATUS_INVALID_PARAMS);
3248 
3249 		goto failed;
3250 	}
3251 
3252 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3253 	if (!cmd) {
3254 		err = -ENOMEM;
3255 		goto failed;
3256 	}
3257 
3258 	cmd->cmd_complete = addr_cmd_complete;
3259 
3260 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3261 	reply.pin_len = cp->pin_len;
3262 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3263 
3264 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3265 	if (err < 0)
3266 		mgmt_pending_remove(cmd);
3267 
3268 failed:
3269 	hci_dev_unlock(hdev);
3270 	return err;
3271 }
3272 
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3273 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3274 			     u16 len)
3275 {
3276 	struct mgmt_cp_set_io_capability *cp = data;
3277 
3278 	bt_dev_dbg(hdev, "sock %p", sk);
3279 
3280 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3281 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3282 				       MGMT_STATUS_INVALID_PARAMS);
3283 
3284 	hci_dev_lock(hdev);
3285 
3286 	hdev->io_capability = cp->io_capability;
3287 
3288 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3289 
3290 	hci_dev_unlock(hdev);
3291 
3292 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3293 				 NULL, 0);
3294 }
3295 
find_pairing(struct hci_conn * conn)3296 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3297 {
3298 	struct hci_dev *hdev = conn->hdev;
3299 	struct mgmt_pending_cmd *cmd;
3300 
3301 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3302 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3303 			continue;
3304 
3305 		if (cmd->user_data != conn)
3306 			continue;
3307 
3308 		return cmd;
3309 	}
3310 
3311 	return NULL;
3312 }
3313 
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3314 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3315 {
3316 	struct mgmt_rp_pair_device rp;
3317 	struct hci_conn *conn = cmd->user_data;
3318 	int err;
3319 
3320 	bacpy(&rp.addr.bdaddr, &conn->dst);
3321 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3322 
3323 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3324 				status, &rp, sizeof(rp));
3325 
3326 	/* So we don't get further callbacks for this connection */
3327 	conn->connect_cfm_cb = NULL;
3328 	conn->security_cfm_cb = NULL;
3329 	conn->disconn_cfm_cb = NULL;
3330 
3331 	hci_conn_drop(conn);
3332 
3333 	/* The device is paired so there is no need to remove
3334 	 * its connection parameters anymore.
3335 	 */
3336 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3337 
3338 	hci_conn_put(conn);
3339 
3340 	return err;
3341 }
3342 
mgmt_smp_complete(struct hci_conn * conn,bool complete)3343 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3344 {
3345 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3346 	struct mgmt_pending_cmd *cmd;
3347 
3348 	cmd = find_pairing(conn);
3349 	if (cmd) {
3350 		cmd->cmd_complete(cmd, status);
3351 		mgmt_pending_remove(cmd);
3352 	}
3353 }
3354 
pairing_complete_cb(struct hci_conn * conn,u8 status)3355 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3356 {
3357 	struct mgmt_pending_cmd *cmd;
3358 
3359 	BT_DBG("status %u", status);
3360 
3361 	cmd = find_pairing(conn);
3362 	if (!cmd) {
3363 		BT_DBG("Unable to find a pending command");
3364 		return;
3365 	}
3366 
3367 	cmd->cmd_complete(cmd, mgmt_status(status));
3368 	mgmt_pending_remove(cmd);
3369 }
3370 
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3371 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3372 {
3373 	struct mgmt_pending_cmd *cmd;
3374 
3375 	BT_DBG("status %u", status);
3376 
3377 	if (!status)
3378 		return;
3379 
3380 	cmd = find_pairing(conn);
3381 	if (!cmd) {
3382 		BT_DBG("Unable to find a pending command");
3383 		return;
3384 	}
3385 
3386 	cmd->cmd_complete(cmd, mgmt_status(status));
3387 	mgmt_pending_remove(cmd);
3388 }
3389 
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3390 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3391 		       u16 len)
3392 {
3393 	struct mgmt_cp_pair_device *cp = data;
3394 	struct mgmt_rp_pair_device rp;
3395 	struct mgmt_pending_cmd *cmd;
3396 	u8 sec_level, auth_type;
3397 	struct hci_conn *conn;
3398 	int err;
3399 
3400 	bt_dev_dbg(hdev, "sock %p", sk);
3401 
3402 	memset(&rp, 0, sizeof(rp));
3403 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3404 	rp.addr.type = cp->addr.type;
3405 
3406 	if (!bdaddr_type_is_valid(cp->addr.type))
3407 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3408 					 MGMT_STATUS_INVALID_PARAMS,
3409 					 &rp, sizeof(rp));
3410 
3411 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3412 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3413 					 MGMT_STATUS_INVALID_PARAMS,
3414 					 &rp, sizeof(rp));
3415 
3416 	hci_dev_lock(hdev);
3417 
3418 	if (!hdev_is_powered(hdev)) {
3419 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3420 					MGMT_STATUS_NOT_POWERED, &rp,
3421 					sizeof(rp));
3422 		goto unlock;
3423 	}
3424 
3425 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3426 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3427 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3428 					sizeof(rp));
3429 		goto unlock;
3430 	}
3431 
3432 	sec_level = BT_SECURITY_MEDIUM;
3433 	auth_type = HCI_AT_DEDICATED_BONDING;
3434 
3435 	if (cp->addr.type == BDADDR_BREDR) {
3436 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3437 				       auth_type, CONN_REASON_PAIR_DEVICE);
3438 	} else {
3439 		u8 addr_type = le_addr_type(cp->addr.type);
3440 		struct hci_conn_params *p;
3441 
3442 		/* When pairing a new device, it is expected to remember
3443 		 * this device for future connections. Adding the connection
3444 		 * parameter information ahead of time allows tracking
3445 		 * of the peripheral preferred values and will speed up any
3446 		 * further connection establishment.
3447 		 *
3448 		 * If connection parameters already exist, then they
3449 		 * will be kept and this function does nothing.
3450 		 */
3451 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3452 		if (!p) {
3453 			err = -EIO;
3454 			goto unlock;
3455 		}
3456 
3457 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3458 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3459 
3460 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3461 					   sec_level, HCI_LE_CONN_TIMEOUT,
3462 					   CONN_REASON_PAIR_DEVICE);
3463 	}
3464 
3465 	if (IS_ERR(conn)) {
3466 		int status;
3467 
3468 		if (PTR_ERR(conn) == -EBUSY)
3469 			status = MGMT_STATUS_BUSY;
3470 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3471 			status = MGMT_STATUS_NOT_SUPPORTED;
3472 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3473 			status = MGMT_STATUS_REJECTED;
3474 		else
3475 			status = MGMT_STATUS_CONNECT_FAILED;
3476 
3477 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3478 					status, &rp, sizeof(rp));
3479 		goto unlock;
3480 	}
3481 
3482 	if (conn->connect_cfm_cb) {
3483 		hci_conn_drop(conn);
3484 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3485 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3486 		goto unlock;
3487 	}
3488 
3489 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3490 	if (!cmd) {
3491 		err = -ENOMEM;
3492 		hci_conn_drop(conn);
3493 		goto unlock;
3494 	}
3495 
3496 	cmd->cmd_complete = pairing_complete;
3497 
3498 	/* For LE, just connecting isn't a proof that the pairing finished */
3499 	if (cp->addr.type == BDADDR_BREDR) {
3500 		conn->connect_cfm_cb = pairing_complete_cb;
3501 		conn->security_cfm_cb = pairing_complete_cb;
3502 		conn->disconn_cfm_cb = pairing_complete_cb;
3503 	} else {
3504 		conn->connect_cfm_cb = le_pairing_complete_cb;
3505 		conn->security_cfm_cb = le_pairing_complete_cb;
3506 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3507 	}
3508 
3509 	conn->io_capability = cp->io_cap;
3510 	cmd->user_data = hci_conn_get(conn);
3511 
3512 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3513 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3514 		cmd->cmd_complete(cmd, 0);
3515 		mgmt_pending_remove(cmd);
3516 	}
3517 
3518 	err = 0;
3519 
3520 unlock:
3521 	hci_dev_unlock(hdev);
3522 	return err;
3523 }
3524 
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3525 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3526 			      u16 len)
3527 {
3528 	struct mgmt_addr_info *addr = data;
3529 	struct mgmt_pending_cmd *cmd;
3530 	struct hci_conn *conn;
3531 	int err;
3532 
3533 	bt_dev_dbg(hdev, "sock %p", sk);
3534 
3535 	hci_dev_lock(hdev);
3536 
3537 	if (!hdev_is_powered(hdev)) {
3538 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3539 				      MGMT_STATUS_NOT_POWERED);
3540 		goto unlock;
3541 	}
3542 
3543 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3544 	if (!cmd) {
3545 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3546 				      MGMT_STATUS_INVALID_PARAMS);
3547 		goto unlock;
3548 	}
3549 
3550 	conn = cmd->user_data;
3551 
3552 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3553 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3554 				      MGMT_STATUS_INVALID_PARAMS);
3555 		goto unlock;
3556 	}
3557 
3558 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3559 	mgmt_pending_remove(cmd);
3560 
3561 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3562 				addr, sizeof(*addr));
3563 
3564 	/* Since user doesn't want to proceed with the connection, abort any
3565 	 * ongoing pairing and then terminate the link if it was created
3566 	 * because of the pair device action.
3567 	 */
3568 	if (addr->type == BDADDR_BREDR)
3569 		hci_remove_link_key(hdev, &addr->bdaddr);
3570 	else
3571 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3572 					      le_addr_type(addr->type));
3573 
3574 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3575 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3576 
3577 unlock:
3578 	hci_dev_unlock(hdev);
3579 	return err;
3580 }
3581 
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3582 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3583 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3584 			     u16 hci_op, __le32 passkey)
3585 {
3586 	struct mgmt_pending_cmd *cmd;
3587 	struct hci_conn *conn;
3588 	int err;
3589 
3590 	hci_dev_lock(hdev);
3591 
3592 	if (!hdev_is_powered(hdev)) {
3593 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3594 					MGMT_STATUS_NOT_POWERED, addr,
3595 					sizeof(*addr));
3596 		goto done;
3597 	}
3598 
3599 	if (addr->type == BDADDR_BREDR)
3600 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3601 	else
3602 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3603 					       le_addr_type(addr->type));
3604 
3605 	if (!conn) {
3606 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3607 					MGMT_STATUS_NOT_CONNECTED, addr,
3608 					sizeof(*addr));
3609 		goto done;
3610 	}
3611 
3612 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3613 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3614 		if (!err)
3615 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3616 						MGMT_STATUS_SUCCESS, addr,
3617 						sizeof(*addr));
3618 		else
3619 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3620 						MGMT_STATUS_FAILED, addr,
3621 						sizeof(*addr));
3622 
3623 		goto done;
3624 	}
3625 
3626 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3627 	if (!cmd) {
3628 		err = -ENOMEM;
3629 		goto done;
3630 	}
3631 
3632 	cmd->cmd_complete = addr_cmd_complete;
3633 
3634 	/* Continue with pairing via HCI */
3635 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3636 		struct hci_cp_user_passkey_reply cp;
3637 
3638 		bacpy(&cp.bdaddr, &addr->bdaddr);
3639 		cp.passkey = passkey;
3640 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3641 	} else
3642 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3643 				   &addr->bdaddr);
3644 
3645 	if (err < 0)
3646 		mgmt_pending_remove(cmd);
3647 
3648 done:
3649 	hci_dev_unlock(hdev);
3650 	return err;
3651 }
3652 
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3653 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3654 			      void *data, u16 len)
3655 {
3656 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3657 
3658 	bt_dev_dbg(hdev, "sock %p", sk);
3659 
3660 	return user_pairing_resp(sk, hdev, &cp->addr,
3661 				MGMT_OP_PIN_CODE_NEG_REPLY,
3662 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3663 }
3664 
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3665 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3666 			      u16 len)
3667 {
3668 	struct mgmt_cp_user_confirm_reply *cp = data;
3669 
3670 	bt_dev_dbg(hdev, "sock %p", sk);
3671 
3672 	if (len != sizeof(*cp))
3673 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3674 				       MGMT_STATUS_INVALID_PARAMS);
3675 
3676 	return user_pairing_resp(sk, hdev, &cp->addr,
3677 				 MGMT_OP_USER_CONFIRM_REPLY,
3678 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3679 }
3680 
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3681 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3682 				  void *data, u16 len)
3683 {
3684 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3685 
3686 	bt_dev_dbg(hdev, "sock %p", sk);
3687 
3688 	return user_pairing_resp(sk, hdev, &cp->addr,
3689 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3690 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3691 }
3692 
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3693 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3694 			      u16 len)
3695 {
3696 	struct mgmt_cp_user_passkey_reply *cp = data;
3697 
3698 	bt_dev_dbg(hdev, "sock %p", sk);
3699 
3700 	return user_pairing_resp(sk, hdev, &cp->addr,
3701 				 MGMT_OP_USER_PASSKEY_REPLY,
3702 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3703 }
3704 
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3705 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3706 				  void *data, u16 len)
3707 {
3708 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3709 
3710 	bt_dev_dbg(hdev, "sock %p", sk);
3711 
3712 	return user_pairing_resp(sk, hdev, &cp->addr,
3713 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3714 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3715 }
3716 
adv_expire_sync(struct hci_dev * hdev,u32 flags)3717 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3718 {
3719 	struct adv_info *adv_instance;
3720 
3721 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3722 	if (!adv_instance)
3723 		return 0;
3724 
3725 	/* stop if current instance doesn't need to be changed */
3726 	if (!(adv_instance->flags & flags))
3727 		return 0;
3728 
3729 	cancel_adv_timeout(hdev);
3730 
3731 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3732 	if (!adv_instance)
3733 		return 0;
3734 
3735 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3736 
3737 	return 0;
3738 }
3739 
name_changed_sync(struct hci_dev * hdev,void * data)3740 static int name_changed_sync(struct hci_dev *hdev, void *data)
3741 {
3742 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3743 }
3744 
set_name_complete(struct hci_dev * hdev,void * data,int err)3745 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3746 {
3747 	struct mgmt_pending_cmd *cmd = data;
3748 	struct mgmt_cp_set_local_name *cp = cmd->param;
3749 	u8 status = mgmt_status(err);
3750 
3751 	bt_dev_dbg(hdev, "err %d", err);
3752 
3753 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3754 		return;
3755 
3756 	if (status) {
3757 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3758 				status);
3759 	} else {
3760 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3761 				  cp, sizeof(*cp));
3762 
3763 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3764 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3765 	}
3766 
3767 	mgmt_pending_remove(cmd);
3768 }
3769 
set_name_sync(struct hci_dev * hdev,void * data)3770 static int set_name_sync(struct hci_dev *hdev, void *data)
3771 {
3772 	if (lmp_bredr_capable(hdev)) {
3773 		hci_update_name_sync(hdev);
3774 		hci_update_eir_sync(hdev);
3775 	}
3776 
3777 	/* The name is stored in the scan response data and so
3778 	 * no need to update the advertising data here.
3779 	 */
3780 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3781 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3782 
3783 	return 0;
3784 }
3785 
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3786 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3787 			  u16 len)
3788 {
3789 	struct mgmt_cp_set_local_name *cp = data;
3790 	struct mgmt_pending_cmd *cmd;
3791 	int err;
3792 
3793 	bt_dev_dbg(hdev, "sock %p", sk);
3794 
3795 	hci_dev_lock(hdev);
3796 
3797 	/* If the old values are the same as the new ones just return a
3798 	 * direct command complete event.
3799 	 */
3800 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3801 	    !memcmp(hdev->short_name, cp->short_name,
3802 		    sizeof(hdev->short_name))) {
3803 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3804 					data, len);
3805 		goto failed;
3806 	}
3807 
3808 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3809 
3810 	if (!hdev_is_powered(hdev)) {
3811 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3812 
3813 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3814 					data, len);
3815 		if (err < 0)
3816 			goto failed;
3817 
3818 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3819 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3820 		ext_info_changed(hdev, sk);
3821 
3822 		goto failed;
3823 	}
3824 
3825 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3826 	if (!cmd)
3827 		err = -ENOMEM;
3828 	else
3829 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3830 					 set_name_complete);
3831 
3832 	if (err < 0) {
3833 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3834 				      MGMT_STATUS_FAILED);
3835 
3836 		if (cmd)
3837 			mgmt_pending_remove(cmd);
3838 
3839 		goto failed;
3840 	}
3841 
3842 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3843 
3844 failed:
3845 	hci_dev_unlock(hdev);
3846 	return err;
3847 }
3848 
appearance_changed_sync(struct hci_dev * hdev,void * data)3849 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3850 {
3851 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3852 }
3853 
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3854 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3855 			  u16 len)
3856 {
3857 	struct mgmt_cp_set_appearance *cp = data;
3858 	u16 appearance;
3859 	int err;
3860 
3861 	bt_dev_dbg(hdev, "sock %p", sk);
3862 
3863 	if (!lmp_le_capable(hdev))
3864 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3865 				       MGMT_STATUS_NOT_SUPPORTED);
3866 
3867 	appearance = le16_to_cpu(cp->appearance);
3868 
3869 	hci_dev_lock(hdev);
3870 
3871 	if (hdev->appearance != appearance) {
3872 		hdev->appearance = appearance;
3873 
3874 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3875 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3876 					   NULL);
3877 
3878 		ext_info_changed(hdev, sk);
3879 	}
3880 
3881 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3882 				0);
3883 
3884 	hci_dev_unlock(hdev);
3885 
3886 	return err;
3887 }
3888 
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3889 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3890 				 void *data, u16 len)
3891 {
3892 	struct mgmt_rp_get_phy_configuration rp;
3893 
3894 	bt_dev_dbg(hdev, "sock %p", sk);
3895 
3896 	hci_dev_lock(hdev);
3897 
3898 	memset(&rp, 0, sizeof(rp));
3899 
3900 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3901 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3902 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3903 
3904 	hci_dev_unlock(hdev);
3905 
3906 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3907 				 &rp, sizeof(rp));
3908 }
3909 
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3910 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3911 {
3912 	struct mgmt_ev_phy_configuration_changed ev;
3913 
3914 	memset(&ev, 0, sizeof(ev));
3915 
3916 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3917 
3918 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3919 			  sizeof(ev), skip);
3920 }
3921 
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3922 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3923 {
3924 	struct mgmt_pending_cmd *cmd = data;
3925 	struct sk_buff *skb = cmd->skb;
3926 	u8 status = mgmt_status(err);
3927 
3928 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3929 		return;
3930 
3931 	if (!status) {
3932 		if (!skb)
3933 			status = MGMT_STATUS_FAILED;
3934 		else if (IS_ERR(skb))
3935 			status = mgmt_status(PTR_ERR(skb));
3936 		else
3937 			status = mgmt_status(skb->data[0]);
3938 	}
3939 
3940 	bt_dev_dbg(hdev, "status %d", status);
3941 
3942 	if (status) {
3943 		mgmt_cmd_status(cmd->sk, hdev->id,
3944 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3945 	} else {
3946 		mgmt_cmd_complete(cmd->sk, hdev->id,
3947 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3948 				  NULL, 0);
3949 
3950 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3951 	}
3952 
3953 	if (skb && !IS_ERR(skb))
3954 		kfree_skb(skb);
3955 
3956 	mgmt_pending_remove(cmd);
3957 }
3958 
set_default_phy_sync(struct hci_dev * hdev,void * data)3959 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3960 {
3961 	struct mgmt_pending_cmd *cmd = data;
3962 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3963 	struct hci_cp_le_set_default_phy cp_phy;
3964 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3965 
3966 	memset(&cp_phy, 0, sizeof(cp_phy));
3967 
3968 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3969 		cp_phy.all_phys |= 0x01;
3970 
3971 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3972 		cp_phy.all_phys |= 0x02;
3973 
3974 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3975 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3976 
3977 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3978 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3979 
3980 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3981 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3982 
3983 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3984 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3985 
3986 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3987 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3988 
3989 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3990 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3991 
3992 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3993 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3994 
3995 	return 0;
3996 }
3997 
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3998 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3999 				 void *data, u16 len)
4000 {
4001 	struct mgmt_cp_set_phy_configuration *cp = data;
4002 	struct mgmt_pending_cmd *cmd;
4003 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4004 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4005 	bool changed = false;
4006 	int err;
4007 
4008 	bt_dev_dbg(hdev, "sock %p", sk);
4009 
4010 	configurable_phys = get_configurable_phys(hdev);
4011 	supported_phys = get_supported_phys(hdev);
4012 	selected_phys = __le32_to_cpu(cp->selected_phys);
4013 
4014 	if (selected_phys & ~supported_phys)
4015 		return mgmt_cmd_status(sk, hdev->id,
4016 				       MGMT_OP_SET_PHY_CONFIGURATION,
4017 				       MGMT_STATUS_INVALID_PARAMS);
4018 
4019 	unconfigure_phys = supported_phys & ~configurable_phys;
4020 
4021 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4022 		return mgmt_cmd_status(sk, hdev->id,
4023 				       MGMT_OP_SET_PHY_CONFIGURATION,
4024 				       MGMT_STATUS_INVALID_PARAMS);
4025 
4026 	if (selected_phys == get_selected_phys(hdev))
4027 		return mgmt_cmd_complete(sk, hdev->id,
4028 					 MGMT_OP_SET_PHY_CONFIGURATION,
4029 					 0, NULL, 0);
4030 
4031 	hci_dev_lock(hdev);
4032 
4033 	if (!hdev_is_powered(hdev)) {
4034 		err = mgmt_cmd_status(sk, hdev->id,
4035 				      MGMT_OP_SET_PHY_CONFIGURATION,
4036 				      MGMT_STATUS_REJECTED);
4037 		goto unlock;
4038 	}
4039 
4040 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4041 		err = mgmt_cmd_status(sk, hdev->id,
4042 				      MGMT_OP_SET_PHY_CONFIGURATION,
4043 				      MGMT_STATUS_BUSY);
4044 		goto unlock;
4045 	}
4046 
4047 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4048 		pkt_type |= (HCI_DH3 | HCI_DM3);
4049 	else
4050 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4051 
4052 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4053 		pkt_type |= (HCI_DH5 | HCI_DM5);
4054 	else
4055 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4056 
4057 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4058 		pkt_type &= ~HCI_2DH1;
4059 	else
4060 		pkt_type |= HCI_2DH1;
4061 
4062 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4063 		pkt_type &= ~HCI_2DH3;
4064 	else
4065 		pkt_type |= HCI_2DH3;
4066 
4067 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4068 		pkt_type &= ~HCI_2DH5;
4069 	else
4070 		pkt_type |= HCI_2DH5;
4071 
4072 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4073 		pkt_type &= ~HCI_3DH1;
4074 	else
4075 		pkt_type |= HCI_3DH1;
4076 
4077 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4078 		pkt_type &= ~HCI_3DH3;
4079 	else
4080 		pkt_type |= HCI_3DH3;
4081 
4082 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4083 		pkt_type &= ~HCI_3DH5;
4084 	else
4085 		pkt_type |= HCI_3DH5;
4086 
4087 	if (pkt_type != hdev->pkt_type) {
4088 		hdev->pkt_type = pkt_type;
4089 		changed = true;
4090 	}
4091 
4092 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4093 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4094 		if (changed)
4095 			mgmt_phy_configuration_changed(hdev, sk);
4096 
4097 		err = mgmt_cmd_complete(sk, hdev->id,
4098 					MGMT_OP_SET_PHY_CONFIGURATION,
4099 					0, NULL, 0);
4100 
4101 		goto unlock;
4102 	}
4103 
4104 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4105 			       len);
4106 	if (!cmd)
4107 		err = -ENOMEM;
4108 	else
4109 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4110 					 set_default_phy_complete);
4111 
4112 	if (err < 0) {
4113 		err = mgmt_cmd_status(sk, hdev->id,
4114 				      MGMT_OP_SET_PHY_CONFIGURATION,
4115 				      MGMT_STATUS_FAILED);
4116 
4117 		if (cmd)
4118 			mgmt_pending_remove(cmd);
4119 	}
4120 
4121 unlock:
4122 	hci_dev_unlock(hdev);
4123 
4124 	return err;
4125 }
4126 
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4127 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4128 			    u16 len)
4129 {
4130 	int err = MGMT_STATUS_SUCCESS;
4131 	struct mgmt_cp_set_blocked_keys *keys = data;
4132 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4133 				   sizeof(struct mgmt_blocked_key_info));
4134 	u16 key_count, expected_len;
4135 	int i;
4136 
4137 	bt_dev_dbg(hdev, "sock %p", sk);
4138 
4139 	key_count = __le16_to_cpu(keys->key_count);
4140 	if (key_count > max_key_count) {
4141 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4142 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4143 				       MGMT_STATUS_INVALID_PARAMS);
4144 	}
4145 
4146 	expected_len = struct_size(keys, keys, key_count);
4147 	if (expected_len != len) {
4148 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4149 			   expected_len, len);
4150 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4151 				       MGMT_STATUS_INVALID_PARAMS);
4152 	}
4153 
4154 	hci_dev_lock(hdev);
4155 
4156 	hci_blocked_keys_clear(hdev);
4157 
4158 	for (i = 0; i < key_count; ++i) {
4159 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4160 
4161 		if (!b) {
4162 			err = MGMT_STATUS_NO_RESOURCES;
4163 			break;
4164 		}
4165 
4166 		b->type = keys->keys[i].type;
4167 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4168 		list_add_rcu(&b->list, &hdev->blocked_keys);
4169 	}
4170 	hci_dev_unlock(hdev);
4171 
4172 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4173 				err, NULL, 0);
4174 }
4175 
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4176 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4177 			       void *data, u16 len)
4178 {
4179 	struct mgmt_mode *cp = data;
4180 	int err;
4181 	bool changed = false;
4182 
4183 	bt_dev_dbg(hdev, "sock %p", sk);
4184 
4185 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4186 		return mgmt_cmd_status(sk, hdev->id,
4187 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4188 				       MGMT_STATUS_NOT_SUPPORTED);
4189 
4190 	if (cp->val != 0x00 && cp->val != 0x01)
4191 		return mgmt_cmd_status(sk, hdev->id,
4192 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4193 				       MGMT_STATUS_INVALID_PARAMS);
4194 
4195 	hci_dev_lock(hdev);
4196 
4197 	if (hdev_is_powered(hdev) &&
4198 	    !!cp->val != hci_dev_test_flag(hdev,
4199 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4200 		err = mgmt_cmd_status(sk, hdev->id,
4201 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4202 				      MGMT_STATUS_REJECTED);
4203 		goto unlock;
4204 	}
4205 
4206 	if (cp->val)
4207 		changed = !hci_dev_test_and_set_flag(hdev,
4208 						   HCI_WIDEBAND_SPEECH_ENABLED);
4209 	else
4210 		changed = hci_dev_test_and_clear_flag(hdev,
4211 						   HCI_WIDEBAND_SPEECH_ENABLED);
4212 
4213 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4214 	if (err < 0)
4215 		goto unlock;
4216 
4217 	if (changed)
4218 		err = new_settings(hdev, sk);
4219 
4220 unlock:
4221 	hci_dev_unlock(hdev);
4222 	return err;
4223 }
4224 
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4225 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4226 			       void *data, u16 data_len)
4227 {
4228 	char buf[20];
4229 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4230 	u16 cap_len = 0;
4231 	u8 flags = 0;
4232 	u8 tx_power_range[2];
4233 
4234 	bt_dev_dbg(hdev, "sock %p", sk);
4235 
4236 	memset(&buf, 0, sizeof(buf));
4237 
4238 	hci_dev_lock(hdev);
4239 
4240 	/* When the Read Simple Pairing Options command is supported, then
4241 	 * the remote public key validation is supported.
4242 	 *
4243 	 * Alternatively, when Microsoft extensions are available, they can
4244 	 * indicate support for public key validation as well.
4245 	 */
4246 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4247 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4248 
4249 	flags |= 0x02;		/* Remote public key validation (LE) */
4250 
4251 	/* When the Read Encryption Key Size command is supported, then the
4252 	 * encryption key size is enforced.
4253 	 */
4254 	if (hdev->commands[20] & 0x10)
4255 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4256 
4257 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4258 
4259 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4260 				  &flags, 1);
4261 
4262 	/* When the Read Simple Pairing Options command is supported, then
4263 	 * also max encryption key size information is provided.
4264 	 */
4265 	if (hdev->commands[41] & 0x08)
4266 		cap_len = eir_append_le16(rp->cap, cap_len,
4267 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4268 					  hdev->max_enc_key_size);
4269 
4270 	cap_len = eir_append_le16(rp->cap, cap_len,
4271 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4272 				  SMP_MAX_ENC_KEY_SIZE);
4273 
4274 	/* Append the min/max LE tx power parameters if we were able to fetch
4275 	 * it from the controller
4276 	 */
4277 	if (hdev->commands[38] & 0x80) {
4278 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4279 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4280 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4281 					  tx_power_range, 2);
4282 	}
4283 
4284 	rp->cap_len = cpu_to_le16(cap_len);
4285 
4286 	hci_dev_unlock(hdev);
4287 
4288 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4289 				 rp, sizeof(*rp) + cap_len);
4290 }
4291 
4292 #ifdef CONFIG_BT_FEATURE_DEBUG
4293 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4294 static const u8 debug_uuid[16] = {
4295 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4296 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4297 };
4298 #endif
4299 
4300 /* 330859bc-7506-492d-9370-9a6f0614037f */
4301 static const u8 quality_report_uuid[16] = {
4302 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4303 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4304 };
4305 
4306 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4307 static const u8 offload_codecs_uuid[16] = {
4308 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4309 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4310 };
4311 
4312 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4313 static const u8 le_simultaneous_roles_uuid[16] = {
4314 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4315 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4316 };
4317 
4318 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4319 static const u8 rpa_resolution_uuid[16] = {
4320 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4321 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4322 };
4323 
4324 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4325 static const u8 iso_socket_uuid[16] = {
4326 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4327 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4328 };
4329 
4330 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4331 static const u8 mgmt_mesh_uuid[16] = {
4332 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4333 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4334 };
4335 
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4336 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4337 				  void *data, u16 data_len)
4338 {
4339 	struct mgmt_rp_read_exp_features_info *rp;
4340 	size_t len;
4341 	u16 idx = 0;
4342 	u32 flags;
4343 	int status;
4344 
4345 	bt_dev_dbg(hdev, "sock %p", sk);
4346 
4347 	/* Enough space for 7 features */
4348 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4349 	rp = kzalloc(len, GFP_KERNEL);
4350 	if (!rp)
4351 		return -ENOMEM;
4352 
4353 #ifdef CONFIG_BT_FEATURE_DEBUG
4354 	if (!hdev) {
4355 		flags = bt_dbg_get() ? BIT(0) : 0;
4356 
4357 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4358 		rp->features[idx].flags = cpu_to_le32(flags);
4359 		idx++;
4360 	}
4361 #endif
4362 
4363 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4364 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4365 			flags = BIT(0);
4366 		else
4367 			flags = 0;
4368 
4369 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4370 		rp->features[idx].flags = cpu_to_le32(flags);
4371 		idx++;
4372 	}
4373 
4374 	if (hdev && ll_privacy_capable(hdev)) {
4375 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4376 			flags = BIT(0) | BIT(1);
4377 		else
4378 			flags = BIT(1);
4379 
4380 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4381 		rp->features[idx].flags = cpu_to_le32(flags);
4382 		idx++;
4383 	}
4384 
4385 	if (hdev && (aosp_has_quality_report(hdev) ||
4386 		     hdev->set_quality_report)) {
4387 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4388 			flags = BIT(0);
4389 		else
4390 			flags = 0;
4391 
4392 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4393 		rp->features[idx].flags = cpu_to_le32(flags);
4394 		idx++;
4395 	}
4396 
4397 	if (hdev && hdev->get_data_path_id) {
4398 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4399 			flags = BIT(0);
4400 		else
4401 			flags = 0;
4402 
4403 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4404 		rp->features[idx].flags = cpu_to_le32(flags);
4405 		idx++;
4406 	}
4407 
4408 	if (IS_ENABLED(CONFIG_BT_LE)) {
4409 		flags = iso_enabled() ? BIT(0) : 0;
4410 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4411 		rp->features[idx].flags = cpu_to_le32(flags);
4412 		idx++;
4413 	}
4414 
4415 	if (hdev && lmp_le_capable(hdev)) {
4416 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4417 			flags = BIT(0);
4418 		else
4419 			flags = 0;
4420 
4421 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4422 		rp->features[idx].flags = cpu_to_le32(flags);
4423 		idx++;
4424 	}
4425 
4426 	rp->feature_count = cpu_to_le16(idx);
4427 
4428 	/* After reading the experimental features information, enable
4429 	 * the events to update client on any future change.
4430 	 */
4431 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4432 
4433 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4434 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4435 				   0, rp, sizeof(*rp) + (20 * idx));
4436 
4437 	kfree(rp);
4438 	return status;
4439 }
4440 
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4441 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4442 					  struct sock *skip)
4443 {
4444 	struct mgmt_ev_exp_feature_changed ev;
4445 
4446 	memset(&ev, 0, sizeof(ev));
4447 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4448 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4449 
4450 	// Do we need to be atomic with the conn_flags?
4451 	if (enabled && privacy_mode_capable(hdev))
4452 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4453 	else
4454 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4455 
4456 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4457 				  &ev, sizeof(ev),
4458 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4459 
4460 }
4461 
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4462 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4463 			       bool enabled, struct sock *skip)
4464 {
4465 	struct mgmt_ev_exp_feature_changed ev;
4466 
4467 	memset(&ev, 0, sizeof(ev));
4468 	memcpy(ev.uuid, uuid, 16);
4469 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4470 
4471 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4472 				  &ev, sizeof(ev),
4473 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4474 }
4475 
4476 #define EXP_FEAT(_uuid, _set_func)	\
4477 {					\
4478 	.uuid = _uuid,			\
4479 	.set_func = _set_func,		\
4480 }
4481 
4482 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4483 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4484 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4485 {
4486 	struct mgmt_rp_set_exp_feature rp;
4487 
4488 	memset(rp.uuid, 0, 16);
4489 	rp.flags = cpu_to_le32(0);
4490 
4491 #ifdef CONFIG_BT_FEATURE_DEBUG
4492 	if (!hdev) {
4493 		bool changed = bt_dbg_get();
4494 
4495 		bt_dbg_set(false);
4496 
4497 		if (changed)
4498 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4499 	}
4500 #endif
4501 
4502 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4503 		bool changed;
4504 
4505 		changed = hci_dev_test_and_clear_flag(hdev,
4506 						      HCI_ENABLE_LL_PRIVACY);
4507 		if (changed)
4508 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4509 					    sk);
4510 	}
4511 
4512 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4513 
4514 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4515 				 MGMT_OP_SET_EXP_FEATURE, 0,
4516 				 &rp, sizeof(rp));
4517 }
4518 
4519 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4520 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4521 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4522 {
4523 	struct mgmt_rp_set_exp_feature rp;
4524 
4525 	bool val, changed;
4526 	int err;
4527 
4528 	/* Command requires to use the non-controller index */
4529 	if (hdev)
4530 		return mgmt_cmd_status(sk, hdev->id,
4531 				       MGMT_OP_SET_EXP_FEATURE,
4532 				       MGMT_STATUS_INVALID_INDEX);
4533 
4534 	/* Parameters are limited to a single octet */
4535 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4536 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4537 				       MGMT_OP_SET_EXP_FEATURE,
4538 				       MGMT_STATUS_INVALID_PARAMS);
4539 
4540 	/* Only boolean on/off is supported */
4541 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4542 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4543 				       MGMT_OP_SET_EXP_FEATURE,
4544 				       MGMT_STATUS_INVALID_PARAMS);
4545 
4546 	val = !!cp->param[0];
4547 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4548 	bt_dbg_set(val);
4549 
4550 	memcpy(rp.uuid, debug_uuid, 16);
4551 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4552 
4553 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4554 
4555 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4556 				MGMT_OP_SET_EXP_FEATURE, 0,
4557 				&rp, sizeof(rp));
4558 
4559 	if (changed)
4560 		exp_feature_changed(hdev, debug_uuid, val, sk);
4561 
4562 	return err;
4563 }
4564 #endif
4565 
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4566 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4567 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4568 {
4569 	struct mgmt_rp_set_exp_feature rp;
4570 	bool val, changed;
4571 	int err;
4572 
4573 	/* Command requires to use the controller index */
4574 	if (!hdev)
4575 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4576 				       MGMT_OP_SET_EXP_FEATURE,
4577 				       MGMT_STATUS_INVALID_INDEX);
4578 
4579 	/* Parameters are limited to a single octet */
4580 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4581 		return mgmt_cmd_status(sk, hdev->id,
4582 				       MGMT_OP_SET_EXP_FEATURE,
4583 				       MGMT_STATUS_INVALID_PARAMS);
4584 
4585 	/* Only boolean on/off is supported */
4586 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4587 		return mgmt_cmd_status(sk, hdev->id,
4588 				       MGMT_OP_SET_EXP_FEATURE,
4589 				       MGMT_STATUS_INVALID_PARAMS);
4590 
4591 	val = !!cp->param[0];
4592 
4593 	if (val) {
4594 		changed = !hci_dev_test_and_set_flag(hdev,
4595 						     HCI_MESH_EXPERIMENTAL);
4596 	} else {
4597 		hci_dev_clear_flag(hdev, HCI_MESH);
4598 		changed = hci_dev_test_and_clear_flag(hdev,
4599 						      HCI_MESH_EXPERIMENTAL);
4600 	}
4601 
4602 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4603 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4604 
4605 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4606 
4607 	err = mgmt_cmd_complete(sk, hdev->id,
4608 				MGMT_OP_SET_EXP_FEATURE, 0,
4609 				&rp, sizeof(rp));
4610 
4611 	if (changed)
4612 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4613 
4614 	return err;
4615 }
4616 
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4617 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4618 				   struct mgmt_cp_set_exp_feature *cp,
4619 				   u16 data_len)
4620 {
4621 	struct mgmt_rp_set_exp_feature rp;
4622 	bool val, changed;
4623 	int err;
4624 	u32 flags;
4625 
4626 	/* Command requires to use the controller index */
4627 	if (!hdev)
4628 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4629 				       MGMT_OP_SET_EXP_FEATURE,
4630 				       MGMT_STATUS_INVALID_INDEX);
4631 
4632 	/* Changes can only be made when controller is powered down */
4633 	if (hdev_is_powered(hdev))
4634 		return mgmt_cmd_status(sk, hdev->id,
4635 				       MGMT_OP_SET_EXP_FEATURE,
4636 				       MGMT_STATUS_REJECTED);
4637 
4638 	/* Parameters are limited to a single octet */
4639 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4640 		return mgmt_cmd_status(sk, hdev->id,
4641 				       MGMT_OP_SET_EXP_FEATURE,
4642 				       MGMT_STATUS_INVALID_PARAMS);
4643 
4644 	/* Only boolean on/off is supported */
4645 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4646 		return mgmt_cmd_status(sk, hdev->id,
4647 				       MGMT_OP_SET_EXP_FEATURE,
4648 				       MGMT_STATUS_INVALID_PARAMS);
4649 
4650 	val = !!cp->param[0];
4651 
4652 	if (val) {
4653 		changed = !hci_dev_test_and_set_flag(hdev,
4654 						     HCI_ENABLE_LL_PRIVACY);
4655 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4656 
4657 		/* Enable LL privacy + supported settings changed */
4658 		flags = BIT(0) | BIT(1);
4659 	} else {
4660 		changed = hci_dev_test_and_clear_flag(hdev,
4661 						      HCI_ENABLE_LL_PRIVACY);
4662 
4663 		/* Disable LL privacy + supported settings changed */
4664 		flags = BIT(1);
4665 	}
4666 
4667 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4668 	rp.flags = cpu_to_le32(flags);
4669 
4670 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4671 
4672 	err = mgmt_cmd_complete(sk, hdev->id,
4673 				MGMT_OP_SET_EXP_FEATURE, 0,
4674 				&rp, sizeof(rp));
4675 
4676 	if (changed)
4677 		exp_ll_privacy_feature_changed(val, hdev, sk);
4678 
4679 	return err;
4680 }
4681 
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4682 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4683 				   struct mgmt_cp_set_exp_feature *cp,
4684 				   u16 data_len)
4685 {
4686 	struct mgmt_rp_set_exp_feature rp;
4687 	bool val, changed;
4688 	int err;
4689 
4690 	/* Command requires to use a valid controller index */
4691 	if (!hdev)
4692 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4693 				       MGMT_OP_SET_EXP_FEATURE,
4694 				       MGMT_STATUS_INVALID_INDEX);
4695 
4696 	/* Parameters are limited to a single octet */
4697 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4698 		return mgmt_cmd_status(sk, hdev->id,
4699 				       MGMT_OP_SET_EXP_FEATURE,
4700 				       MGMT_STATUS_INVALID_PARAMS);
4701 
4702 	/* Only boolean on/off is supported */
4703 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4704 		return mgmt_cmd_status(sk, hdev->id,
4705 				       MGMT_OP_SET_EXP_FEATURE,
4706 				       MGMT_STATUS_INVALID_PARAMS);
4707 
4708 	hci_req_sync_lock(hdev);
4709 
4710 	val = !!cp->param[0];
4711 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4712 
4713 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4714 		err = mgmt_cmd_status(sk, hdev->id,
4715 				      MGMT_OP_SET_EXP_FEATURE,
4716 				      MGMT_STATUS_NOT_SUPPORTED);
4717 		goto unlock_quality_report;
4718 	}
4719 
4720 	if (changed) {
4721 		if (hdev->set_quality_report)
4722 			err = hdev->set_quality_report(hdev, val);
4723 		else
4724 			err = aosp_set_quality_report(hdev, val);
4725 
4726 		if (err) {
4727 			err = mgmt_cmd_status(sk, hdev->id,
4728 					      MGMT_OP_SET_EXP_FEATURE,
4729 					      MGMT_STATUS_FAILED);
4730 			goto unlock_quality_report;
4731 		}
4732 
4733 		if (val)
4734 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4735 		else
4736 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4737 	}
4738 
4739 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4740 
4741 	memcpy(rp.uuid, quality_report_uuid, 16);
4742 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4743 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4744 
4745 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4746 				&rp, sizeof(rp));
4747 
4748 	if (changed)
4749 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4750 
4751 unlock_quality_report:
4752 	hci_req_sync_unlock(hdev);
4753 	return err;
4754 }
4755 
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4756 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4757 				  struct mgmt_cp_set_exp_feature *cp,
4758 				  u16 data_len)
4759 {
4760 	bool val, changed;
4761 	int err;
4762 	struct mgmt_rp_set_exp_feature rp;
4763 
4764 	/* Command requires to use a valid controller index */
4765 	if (!hdev)
4766 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4767 				       MGMT_OP_SET_EXP_FEATURE,
4768 				       MGMT_STATUS_INVALID_INDEX);
4769 
4770 	/* Parameters are limited to a single octet */
4771 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4772 		return mgmt_cmd_status(sk, hdev->id,
4773 				       MGMT_OP_SET_EXP_FEATURE,
4774 				       MGMT_STATUS_INVALID_PARAMS);
4775 
4776 	/* Only boolean on/off is supported */
4777 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4778 		return mgmt_cmd_status(sk, hdev->id,
4779 				       MGMT_OP_SET_EXP_FEATURE,
4780 				       MGMT_STATUS_INVALID_PARAMS);
4781 
4782 	val = !!cp->param[0];
4783 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4784 
4785 	if (!hdev->get_data_path_id) {
4786 		return mgmt_cmd_status(sk, hdev->id,
4787 				       MGMT_OP_SET_EXP_FEATURE,
4788 				       MGMT_STATUS_NOT_SUPPORTED);
4789 	}
4790 
4791 	if (changed) {
4792 		if (val)
4793 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4794 		else
4795 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4796 	}
4797 
4798 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4799 		    val, changed);
4800 
4801 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4802 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4803 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4804 	err = mgmt_cmd_complete(sk, hdev->id,
4805 				MGMT_OP_SET_EXP_FEATURE, 0,
4806 				&rp, sizeof(rp));
4807 
4808 	if (changed)
4809 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4810 
4811 	return err;
4812 }
4813 
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4814 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4815 					  struct mgmt_cp_set_exp_feature *cp,
4816 					  u16 data_len)
4817 {
4818 	bool val, changed;
4819 	int err;
4820 	struct mgmt_rp_set_exp_feature rp;
4821 
4822 	/* Command requires to use a valid controller index */
4823 	if (!hdev)
4824 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4825 				       MGMT_OP_SET_EXP_FEATURE,
4826 				       MGMT_STATUS_INVALID_INDEX);
4827 
4828 	/* Parameters are limited to a single octet */
4829 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4830 		return mgmt_cmd_status(sk, hdev->id,
4831 				       MGMT_OP_SET_EXP_FEATURE,
4832 				       MGMT_STATUS_INVALID_PARAMS);
4833 
4834 	/* Only boolean on/off is supported */
4835 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4836 		return mgmt_cmd_status(sk, hdev->id,
4837 				       MGMT_OP_SET_EXP_FEATURE,
4838 				       MGMT_STATUS_INVALID_PARAMS);
4839 
4840 	val = !!cp->param[0];
4841 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4842 
4843 	if (!hci_dev_le_state_simultaneous(hdev)) {
4844 		return mgmt_cmd_status(sk, hdev->id,
4845 				       MGMT_OP_SET_EXP_FEATURE,
4846 				       MGMT_STATUS_NOT_SUPPORTED);
4847 	}
4848 
4849 	if (changed) {
4850 		if (val)
4851 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4852 		else
4853 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4854 	}
4855 
4856 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4857 		    val, changed);
4858 
4859 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4860 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4861 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4862 	err = mgmt_cmd_complete(sk, hdev->id,
4863 				MGMT_OP_SET_EXP_FEATURE, 0,
4864 				&rp, sizeof(rp));
4865 
4866 	if (changed)
4867 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4868 
4869 	return err;
4870 }
4871 
4872 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4873 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4874 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4875 {
4876 	struct mgmt_rp_set_exp_feature rp;
4877 	bool val, changed = false;
4878 	int err;
4879 
4880 	/* Command requires to use the non-controller index */
4881 	if (hdev)
4882 		return mgmt_cmd_status(sk, hdev->id,
4883 				       MGMT_OP_SET_EXP_FEATURE,
4884 				       MGMT_STATUS_INVALID_INDEX);
4885 
4886 	/* Parameters are limited to a single octet */
4887 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4888 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4889 				       MGMT_OP_SET_EXP_FEATURE,
4890 				       MGMT_STATUS_INVALID_PARAMS);
4891 
4892 	/* Only boolean on/off is supported */
4893 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4894 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4895 				       MGMT_OP_SET_EXP_FEATURE,
4896 				       MGMT_STATUS_INVALID_PARAMS);
4897 
4898 	val = cp->param[0] ? true : false;
4899 	if (val)
4900 		err = iso_init();
4901 	else
4902 		err = iso_exit();
4903 
4904 	if (!err)
4905 		changed = true;
4906 
4907 	memcpy(rp.uuid, iso_socket_uuid, 16);
4908 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4909 
4910 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4911 
4912 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4913 				MGMT_OP_SET_EXP_FEATURE, 0,
4914 				&rp, sizeof(rp));
4915 
4916 	if (changed)
4917 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4918 
4919 	return err;
4920 }
4921 #endif
4922 
4923 static const struct mgmt_exp_feature {
4924 	const u8 *uuid;
4925 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4926 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4927 } exp_features[] = {
4928 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4929 #ifdef CONFIG_BT_FEATURE_DEBUG
4930 	EXP_FEAT(debug_uuid, set_debug_func),
4931 #endif
4932 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4933 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4934 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4935 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4936 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4937 #ifdef CONFIG_BT_LE
4938 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4939 #endif
4940 
4941 	/* end with a null feature */
4942 	EXP_FEAT(NULL, NULL)
4943 };
4944 
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4945 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4946 			   void *data, u16 data_len)
4947 {
4948 	struct mgmt_cp_set_exp_feature *cp = data;
4949 	size_t i = 0;
4950 
4951 	bt_dev_dbg(hdev, "sock %p", sk);
4952 
4953 	for (i = 0; exp_features[i].uuid; i++) {
4954 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4955 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4956 	}
4957 
4958 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4959 			       MGMT_OP_SET_EXP_FEATURE,
4960 			       MGMT_STATUS_NOT_SUPPORTED);
4961 }
4962 
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4963 static u32 get_params_flags(struct hci_dev *hdev,
4964 			    struct hci_conn_params *params)
4965 {
4966 	u32 flags = hdev->conn_flags;
4967 
4968 	/* Devices using RPAs can only be programmed in the acceptlist if
4969 	 * LL Privacy has been enable otherwise they cannot mark
4970 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4971 	 */
4972 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4973 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4974 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4975 
4976 	return flags;
4977 }
4978 
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4979 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4980 			    u16 data_len)
4981 {
4982 	struct mgmt_cp_get_device_flags *cp = data;
4983 	struct mgmt_rp_get_device_flags rp;
4984 	struct bdaddr_list_with_flags *br_params;
4985 	struct hci_conn_params *params;
4986 	u32 supported_flags;
4987 	u32 current_flags = 0;
4988 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4989 
4990 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4991 		   &cp->addr.bdaddr, cp->addr.type);
4992 
4993 	hci_dev_lock(hdev);
4994 
4995 	supported_flags = hdev->conn_flags;
4996 
4997 	memset(&rp, 0, sizeof(rp));
4998 
4999 	if (cp->addr.type == BDADDR_BREDR) {
5000 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5001 							      &cp->addr.bdaddr,
5002 							      cp->addr.type);
5003 		if (!br_params)
5004 			goto done;
5005 
5006 		current_flags = br_params->flags;
5007 	} else {
5008 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5009 						le_addr_type(cp->addr.type));
5010 		if (!params)
5011 			goto done;
5012 
5013 		supported_flags = get_params_flags(hdev, params);
5014 		current_flags = params->flags;
5015 	}
5016 
5017 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5018 	rp.addr.type = cp->addr.type;
5019 	rp.supported_flags = cpu_to_le32(supported_flags);
5020 	rp.current_flags = cpu_to_le32(current_flags);
5021 
5022 	status = MGMT_STATUS_SUCCESS;
5023 
5024 done:
5025 	hci_dev_unlock(hdev);
5026 
5027 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5028 				&rp, sizeof(rp));
5029 }
5030 
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5031 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5032 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5033 				 u32 supported_flags, u32 current_flags)
5034 {
5035 	struct mgmt_ev_device_flags_changed ev;
5036 
5037 	bacpy(&ev.addr.bdaddr, bdaddr);
5038 	ev.addr.type = bdaddr_type;
5039 	ev.supported_flags = cpu_to_le32(supported_flags);
5040 	ev.current_flags = cpu_to_le32(current_flags);
5041 
5042 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5043 }
5044 
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5045 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5046 			    u16 len)
5047 {
5048 	struct mgmt_cp_set_device_flags *cp = data;
5049 	struct bdaddr_list_with_flags *br_params;
5050 	struct hci_conn_params *params;
5051 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5052 	u32 supported_flags;
5053 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5054 
5055 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5056 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5057 
5058 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5059 	supported_flags = hdev->conn_flags;
5060 
5061 	if ((supported_flags | current_flags) != supported_flags) {
5062 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5063 			    current_flags, supported_flags);
5064 		goto done;
5065 	}
5066 
5067 	hci_dev_lock(hdev);
5068 
5069 	if (cp->addr.type == BDADDR_BREDR) {
5070 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5071 							      &cp->addr.bdaddr,
5072 							      cp->addr.type);
5073 
5074 		if (br_params) {
5075 			br_params->flags = current_flags;
5076 			status = MGMT_STATUS_SUCCESS;
5077 		} else {
5078 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5079 				    &cp->addr.bdaddr, cp->addr.type);
5080 		}
5081 
5082 		goto unlock;
5083 	}
5084 
5085 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5086 					le_addr_type(cp->addr.type));
5087 	if (!params) {
5088 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5089 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5090 		goto unlock;
5091 	}
5092 
5093 	supported_flags = get_params_flags(hdev, params);
5094 
5095 	if ((supported_flags | current_flags) != supported_flags) {
5096 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5097 			    current_flags, supported_flags);
5098 		goto unlock;
5099 	}
5100 
5101 	WRITE_ONCE(params->flags, current_flags);
5102 	status = MGMT_STATUS_SUCCESS;
5103 
5104 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5105 	 * has been set.
5106 	 */
5107 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5108 		hci_update_passive_scan(hdev);
5109 
5110 unlock:
5111 	hci_dev_unlock(hdev);
5112 
5113 done:
5114 	if (status == MGMT_STATUS_SUCCESS)
5115 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5116 				     supported_flags, current_flags);
5117 
5118 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5119 				 &cp->addr, sizeof(cp->addr));
5120 }
5121 
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5122 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5123 				   u16 handle)
5124 {
5125 	struct mgmt_ev_adv_monitor_added ev;
5126 
5127 	ev.monitor_handle = cpu_to_le16(handle);
5128 
5129 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5130 }
5131 
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5132 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5133 {
5134 	struct mgmt_ev_adv_monitor_removed ev;
5135 	struct mgmt_pending_cmd *cmd;
5136 	struct sock *sk_skip = NULL;
5137 	struct mgmt_cp_remove_adv_monitor *cp;
5138 
5139 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5140 	if (cmd) {
5141 		cp = cmd->param;
5142 
5143 		if (cp->monitor_handle)
5144 			sk_skip = cmd->sk;
5145 	}
5146 
5147 	ev.monitor_handle = cpu_to_le16(handle);
5148 
5149 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5150 }
5151 
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5152 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5153 				 void *data, u16 len)
5154 {
5155 	struct adv_monitor *monitor = NULL;
5156 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5157 	int handle, err;
5158 	size_t rp_size = 0;
5159 	__u32 supported = 0;
5160 	__u32 enabled = 0;
5161 	__u16 num_handles = 0;
5162 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5163 
5164 	BT_DBG("request for %s", hdev->name);
5165 
5166 	hci_dev_lock(hdev);
5167 
5168 	if (msft_monitor_supported(hdev))
5169 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5170 
5171 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5172 		handles[num_handles++] = monitor->handle;
5173 
5174 	hci_dev_unlock(hdev);
5175 
5176 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5177 	rp = kmalloc(rp_size, GFP_KERNEL);
5178 	if (!rp)
5179 		return -ENOMEM;
5180 
5181 	/* All supported features are currently enabled */
5182 	enabled = supported;
5183 
5184 	rp->supported_features = cpu_to_le32(supported);
5185 	rp->enabled_features = cpu_to_le32(enabled);
5186 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5187 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5188 	rp->num_handles = cpu_to_le16(num_handles);
5189 	if (num_handles)
5190 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5191 
5192 	err = mgmt_cmd_complete(sk, hdev->id,
5193 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5194 				MGMT_STATUS_SUCCESS, rp, rp_size);
5195 
5196 	kfree(rp);
5197 
5198 	return err;
5199 }
5200 
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5201 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5202 						   void *data, int status)
5203 {
5204 	struct mgmt_rp_add_adv_patterns_monitor rp;
5205 	struct mgmt_pending_cmd *cmd = data;
5206 	struct adv_monitor *monitor = cmd->user_data;
5207 
5208 	hci_dev_lock(hdev);
5209 
5210 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5211 
5212 	if (!status) {
5213 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5214 		hdev->adv_monitors_cnt++;
5215 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5216 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5217 		hci_update_passive_scan(hdev);
5218 	}
5219 
5220 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5221 			  mgmt_status(status), &rp, sizeof(rp));
5222 	mgmt_pending_remove(cmd);
5223 
5224 	hci_dev_unlock(hdev);
5225 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5226 		   rp.monitor_handle, status);
5227 }
5228 
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5229 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5230 {
5231 	struct mgmt_pending_cmd *cmd = data;
5232 	struct adv_monitor *monitor = cmd->user_data;
5233 
5234 	return hci_add_adv_monitor(hdev, monitor);
5235 }
5236 
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5237 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5238 				      struct adv_monitor *m, u8 status,
5239 				      void *data, u16 len, u16 op)
5240 {
5241 	struct mgmt_pending_cmd *cmd;
5242 	int err;
5243 
5244 	hci_dev_lock(hdev);
5245 
5246 	if (status)
5247 		goto unlock;
5248 
5249 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5250 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5251 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5252 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5253 		status = MGMT_STATUS_BUSY;
5254 		goto unlock;
5255 	}
5256 
5257 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5258 	if (!cmd) {
5259 		status = MGMT_STATUS_NO_RESOURCES;
5260 		goto unlock;
5261 	}
5262 
5263 	cmd->user_data = m;
5264 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5265 				 mgmt_add_adv_patterns_monitor_complete);
5266 	if (err) {
5267 		if (err == -ENOMEM)
5268 			status = MGMT_STATUS_NO_RESOURCES;
5269 		else
5270 			status = MGMT_STATUS_FAILED;
5271 
5272 		goto unlock;
5273 	}
5274 
5275 	hci_dev_unlock(hdev);
5276 
5277 	return 0;
5278 
5279 unlock:
5280 	hci_free_adv_monitor(hdev, m);
5281 	hci_dev_unlock(hdev);
5282 	return mgmt_cmd_status(sk, hdev->id, op, status);
5283 }
5284 
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5285 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5286 				   struct mgmt_adv_rssi_thresholds *rssi)
5287 {
5288 	if (rssi) {
5289 		m->rssi.low_threshold = rssi->low_threshold;
5290 		m->rssi.low_threshold_timeout =
5291 		    __le16_to_cpu(rssi->low_threshold_timeout);
5292 		m->rssi.high_threshold = rssi->high_threshold;
5293 		m->rssi.high_threshold_timeout =
5294 		    __le16_to_cpu(rssi->high_threshold_timeout);
5295 		m->rssi.sampling_period = rssi->sampling_period;
5296 	} else {
5297 		/* Default values. These numbers are the least constricting
5298 		 * parameters for MSFT API to work, so it behaves as if there
5299 		 * are no rssi parameter to consider. May need to be changed
5300 		 * if other API are to be supported.
5301 		 */
5302 		m->rssi.low_threshold = -127;
5303 		m->rssi.low_threshold_timeout = 60;
5304 		m->rssi.high_threshold = -127;
5305 		m->rssi.high_threshold_timeout = 0;
5306 		m->rssi.sampling_period = 0;
5307 	}
5308 }
5309 
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5310 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5311 				    struct mgmt_adv_pattern *patterns)
5312 {
5313 	u8 offset = 0, length = 0;
5314 	struct adv_pattern *p = NULL;
5315 	int i;
5316 
5317 	for (i = 0; i < pattern_count; i++) {
5318 		offset = patterns[i].offset;
5319 		length = patterns[i].length;
5320 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5321 		    length > HCI_MAX_EXT_AD_LENGTH ||
5322 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5323 			return MGMT_STATUS_INVALID_PARAMS;
5324 
5325 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5326 		if (!p)
5327 			return MGMT_STATUS_NO_RESOURCES;
5328 
5329 		p->ad_type = patterns[i].ad_type;
5330 		p->offset = patterns[i].offset;
5331 		p->length = patterns[i].length;
5332 		memcpy(p->value, patterns[i].value, p->length);
5333 
5334 		INIT_LIST_HEAD(&p->list);
5335 		list_add(&p->list, &m->patterns);
5336 	}
5337 
5338 	return MGMT_STATUS_SUCCESS;
5339 }
5340 
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5341 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5342 				    void *data, u16 len)
5343 {
5344 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5345 	struct adv_monitor *m = NULL;
5346 	u8 status = MGMT_STATUS_SUCCESS;
5347 	size_t expected_size = sizeof(*cp);
5348 
5349 	BT_DBG("request for %s", hdev->name);
5350 
5351 	if (len <= sizeof(*cp)) {
5352 		status = MGMT_STATUS_INVALID_PARAMS;
5353 		goto done;
5354 	}
5355 
5356 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5357 	if (len != expected_size) {
5358 		status = MGMT_STATUS_INVALID_PARAMS;
5359 		goto done;
5360 	}
5361 
5362 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5363 	if (!m) {
5364 		status = MGMT_STATUS_NO_RESOURCES;
5365 		goto done;
5366 	}
5367 
5368 	INIT_LIST_HEAD(&m->patterns);
5369 
5370 	parse_adv_monitor_rssi(m, NULL);
5371 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5372 
5373 done:
5374 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5375 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5376 }
5377 
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5378 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5379 					 void *data, u16 len)
5380 {
5381 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5382 	struct adv_monitor *m = NULL;
5383 	u8 status = MGMT_STATUS_SUCCESS;
5384 	size_t expected_size = sizeof(*cp);
5385 
5386 	BT_DBG("request for %s", hdev->name);
5387 
5388 	if (len <= sizeof(*cp)) {
5389 		status = MGMT_STATUS_INVALID_PARAMS;
5390 		goto done;
5391 	}
5392 
5393 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5394 	if (len != expected_size) {
5395 		status = MGMT_STATUS_INVALID_PARAMS;
5396 		goto done;
5397 	}
5398 
5399 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5400 	if (!m) {
5401 		status = MGMT_STATUS_NO_RESOURCES;
5402 		goto done;
5403 	}
5404 
5405 	INIT_LIST_HEAD(&m->patterns);
5406 
5407 	parse_adv_monitor_rssi(m, &cp->rssi);
5408 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5409 
5410 done:
5411 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5412 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5413 }
5414 
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5415 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5416 					     void *data, int status)
5417 {
5418 	struct mgmt_rp_remove_adv_monitor rp;
5419 	struct mgmt_pending_cmd *cmd = data;
5420 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5421 
5422 	hci_dev_lock(hdev);
5423 
5424 	rp.monitor_handle = cp->monitor_handle;
5425 
5426 	if (!status)
5427 		hci_update_passive_scan(hdev);
5428 
5429 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5430 			  mgmt_status(status), &rp, sizeof(rp));
5431 	mgmt_pending_remove(cmd);
5432 
5433 	hci_dev_unlock(hdev);
5434 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5435 		   rp.monitor_handle, status);
5436 }
5437 
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5438 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5439 {
5440 	struct mgmt_pending_cmd *cmd = data;
5441 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5442 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5443 
5444 	if (!handle)
5445 		return hci_remove_all_adv_monitor(hdev);
5446 
5447 	return hci_remove_single_adv_monitor(hdev, handle);
5448 }
5449 
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5450 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5451 			      void *data, u16 len)
5452 {
5453 	struct mgmt_pending_cmd *cmd;
5454 	int err, status;
5455 
5456 	hci_dev_lock(hdev);
5457 
5458 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5459 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5460 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5461 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5462 		status = MGMT_STATUS_BUSY;
5463 		goto unlock;
5464 	}
5465 
5466 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5467 	if (!cmd) {
5468 		status = MGMT_STATUS_NO_RESOURCES;
5469 		goto unlock;
5470 	}
5471 
5472 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5473 				  mgmt_remove_adv_monitor_complete);
5474 
5475 	if (err) {
5476 		mgmt_pending_remove(cmd);
5477 
5478 		if (err == -ENOMEM)
5479 			status = MGMT_STATUS_NO_RESOURCES;
5480 		else
5481 			status = MGMT_STATUS_FAILED;
5482 
5483 		goto unlock;
5484 	}
5485 
5486 	hci_dev_unlock(hdev);
5487 
5488 	return 0;
5489 
5490 unlock:
5491 	hci_dev_unlock(hdev);
5492 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5493 			       status);
5494 }
5495 
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5496 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5497 {
5498 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5499 	size_t rp_size = sizeof(mgmt_rp);
5500 	struct mgmt_pending_cmd *cmd = data;
5501 	struct sk_buff *skb = cmd->skb;
5502 	u8 status = mgmt_status(err);
5503 
5504 	if (!status) {
5505 		if (!skb)
5506 			status = MGMT_STATUS_FAILED;
5507 		else if (IS_ERR(skb))
5508 			status = mgmt_status(PTR_ERR(skb));
5509 		else
5510 			status = mgmt_status(skb->data[0]);
5511 	}
5512 
5513 	bt_dev_dbg(hdev, "status %d", status);
5514 
5515 	if (status) {
5516 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5517 		goto remove;
5518 	}
5519 
5520 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5521 
5522 	if (!bredr_sc_enabled(hdev)) {
5523 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5524 
5525 		if (skb->len < sizeof(*rp)) {
5526 			mgmt_cmd_status(cmd->sk, hdev->id,
5527 					MGMT_OP_READ_LOCAL_OOB_DATA,
5528 					MGMT_STATUS_FAILED);
5529 			goto remove;
5530 		}
5531 
5532 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5533 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5534 
5535 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5536 	} else {
5537 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5538 
5539 		if (skb->len < sizeof(*rp)) {
5540 			mgmt_cmd_status(cmd->sk, hdev->id,
5541 					MGMT_OP_READ_LOCAL_OOB_DATA,
5542 					MGMT_STATUS_FAILED);
5543 			goto remove;
5544 		}
5545 
5546 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5547 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5548 
5549 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5550 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5551 	}
5552 
5553 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5554 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5555 
5556 remove:
5557 	if (skb && !IS_ERR(skb))
5558 		kfree_skb(skb);
5559 
5560 	mgmt_pending_free(cmd);
5561 }
5562 
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5563 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5564 {
5565 	struct mgmt_pending_cmd *cmd = data;
5566 
5567 	if (bredr_sc_enabled(hdev))
5568 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5569 	else
5570 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5571 
5572 	if (IS_ERR(cmd->skb))
5573 		return PTR_ERR(cmd->skb);
5574 	else
5575 		return 0;
5576 }
5577 
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5578 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5579 			       void *data, u16 data_len)
5580 {
5581 	struct mgmt_pending_cmd *cmd;
5582 	int err;
5583 
5584 	bt_dev_dbg(hdev, "sock %p", sk);
5585 
5586 	hci_dev_lock(hdev);
5587 
5588 	if (!hdev_is_powered(hdev)) {
5589 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5590 				      MGMT_STATUS_NOT_POWERED);
5591 		goto unlock;
5592 	}
5593 
5594 	if (!lmp_ssp_capable(hdev)) {
5595 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5596 				      MGMT_STATUS_NOT_SUPPORTED);
5597 		goto unlock;
5598 	}
5599 
5600 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5601 	if (!cmd)
5602 		err = -ENOMEM;
5603 	else
5604 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5605 					 read_local_oob_data_complete);
5606 
5607 	if (err < 0) {
5608 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5609 				      MGMT_STATUS_FAILED);
5610 
5611 		if (cmd)
5612 			mgmt_pending_free(cmd);
5613 	}
5614 
5615 unlock:
5616 	hci_dev_unlock(hdev);
5617 	return err;
5618 }
5619 
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5620 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5621 			       void *data, u16 len)
5622 {
5623 	struct mgmt_addr_info *addr = data;
5624 	int err;
5625 
5626 	bt_dev_dbg(hdev, "sock %p", sk);
5627 
5628 	if (!bdaddr_type_is_valid(addr->type))
5629 		return mgmt_cmd_complete(sk, hdev->id,
5630 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5631 					 MGMT_STATUS_INVALID_PARAMS,
5632 					 addr, sizeof(*addr));
5633 
5634 	hci_dev_lock(hdev);
5635 
5636 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5637 		struct mgmt_cp_add_remote_oob_data *cp = data;
5638 		u8 status;
5639 
5640 		if (cp->addr.type != BDADDR_BREDR) {
5641 			err = mgmt_cmd_complete(sk, hdev->id,
5642 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5643 						MGMT_STATUS_INVALID_PARAMS,
5644 						&cp->addr, sizeof(cp->addr));
5645 			goto unlock;
5646 		}
5647 
5648 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5649 					      cp->addr.type, cp->hash,
5650 					      cp->rand, NULL, NULL);
5651 		if (err < 0)
5652 			status = MGMT_STATUS_FAILED;
5653 		else
5654 			status = MGMT_STATUS_SUCCESS;
5655 
5656 		err = mgmt_cmd_complete(sk, hdev->id,
5657 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5658 					&cp->addr, sizeof(cp->addr));
5659 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5660 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5661 		u8 *rand192, *hash192, *rand256, *hash256;
5662 		u8 status;
5663 
5664 		if (bdaddr_type_is_le(cp->addr.type)) {
5665 			/* Enforce zero-valued 192-bit parameters as
5666 			 * long as legacy SMP OOB isn't implemented.
5667 			 */
5668 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5669 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5670 				err = mgmt_cmd_complete(sk, hdev->id,
5671 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5672 							MGMT_STATUS_INVALID_PARAMS,
5673 							addr, sizeof(*addr));
5674 				goto unlock;
5675 			}
5676 
5677 			rand192 = NULL;
5678 			hash192 = NULL;
5679 		} else {
5680 			/* In case one of the P-192 values is set to zero,
5681 			 * then just disable OOB data for P-192.
5682 			 */
5683 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5684 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5685 				rand192 = NULL;
5686 				hash192 = NULL;
5687 			} else {
5688 				rand192 = cp->rand192;
5689 				hash192 = cp->hash192;
5690 			}
5691 		}
5692 
5693 		/* In case one of the P-256 values is set to zero, then just
5694 		 * disable OOB data for P-256.
5695 		 */
5696 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5697 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5698 			rand256 = NULL;
5699 			hash256 = NULL;
5700 		} else {
5701 			rand256 = cp->rand256;
5702 			hash256 = cp->hash256;
5703 		}
5704 
5705 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5706 					      cp->addr.type, hash192, rand192,
5707 					      hash256, rand256);
5708 		if (err < 0)
5709 			status = MGMT_STATUS_FAILED;
5710 		else
5711 			status = MGMT_STATUS_SUCCESS;
5712 
5713 		err = mgmt_cmd_complete(sk, hdev->id,
5714 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5715 					status, &cp->addr, sizeof(cp->addr));
5716 	} else {
5717 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5718 			   len);
5719 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5720 				      MGMT_STATUS_INVALID_PARAMS);
5721 	}
5722 
5723 unlock:
5724 	hci_dev_unlock(hdev);
5725 	return err;
5726 }
5727 
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5728 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5729 				  void *data, u16 len)
5730 {
5731 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5732 	u8 status;
5733 	int err;
5734 
5735 	bt_dev_dbg(hdev, "sock %p", sk);
5736 
5737 	if (cp->addr.type != BDADDR_BREDR)
5738 		return mgmt_cmd_complete(sk, hdev->id,
5739 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5740 					 MGMT_STATUS_INVALID_PARAMS,
5741 					 &cp->addr, sizeof(cp->addr));
5742 
5743 	hci_dev_lock(hdev);
5744 
5745 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5746 		hci_remote_oob_data_clear(hdev);
5747 		status = MGMT_STATUS_SUCCESS;
5748 		goto done;
5749 	}
5750 
5751 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5752 	if (err < 0)
5753 		status = MGMT_STATUS_INVALID_PARAMS;
5754 	else
5755 		status = MGMT_STATUS_SUCCESS;
5756 
5757 done:
5758 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5759 				status, &cp->addr, sizeof(cp->addr));
5760 
5761 	hci_dev_unlock(hdev);
5762 	return err;
5763 }
5764 
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5765 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5766 {
5767 	struct mgmt_pending_cmd *cmd;
5768 
5769 	bt_dev_dbg(hdev, "status %u", status);
5770 
5771 	hci_dev_lock(hdev);
5772 
5773 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5774 	if (!cmd)
5775 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5776 
5777 	if (!cmd)
5778 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5779 
5780 	if (cmd) {
5781 		cmd->cmd_complete(cmd, mgmt_status(status));
5782 		mgmt_pending_remove(cmd);
5783 	}
5784 
5785 	hci_dev_unlock(hdev);
5786 }
5787 
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5788 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5789 				    uint8_t *mgmt_status)
5790 {
5791 	switch (type) {
5792 	case DISCOV_TYPE_LE:
5793 		*mgmt_status = mgmt_le_support(hdev);
5794 		if (*mgmt_status)
5795 			return false;
5796 		break;
5797 	case DISCOV_TYPE_INTERLEAVED:
5798 		*mgmt_status = mgmt_le_support(hdev);
5799 		if (*mgmt_status)
5800 			return false;
5801 		fallthrough;
5802 	case DISCOV_TYPE_BREDR:
5803 		*mgmt_status = mgmt_bredr_support(hdev);
5804 		if (*mgmt_status)
5805 			return false;
5806 		break;
5807 	default:
5808 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5809 		return false;
5810 	}
5811 
5812 	return true;
5813 }
5814 
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5815 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5816 {
5817 	struct mgmt_pending_cmd *cmd = data;
5818 
5819 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5820 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5821 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5822 		return;
5823 
5824 	bt_dev_dbg(hdev, "err %d", err);
5825 
5826 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5827 			  cmd->param, 1);
5828 	mgmt_pending_remove(cmd);
5829 
5830 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5831 				DISCOVERY_FINDING);
5832 }
5833 
start_discovery_sync(struct hci_dev * hdev,void * data)5834 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5835 {
5836 	return hci_start_discovery_sync(hdev);
5837 }
5838 
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5839 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5840 				    u16 op, void *data, u16 len)
5841 {
5842 	struct mgmt_cp_start_discovery *cp = data;
5843 	struct mgmt_pending_cmd *cmd;
5844 	u8 status;
5845 	int err;
5846 
5847 	bt_dev_dbg(hdev, "sock %p", sk);
5848 
5849 	hci_dev_lock(hdev);
5850 
5851 	if (!hdev_is_powered(hdev)) {
5852 		err = mgmt_cmd_complete(sk, hdev->id, op,
5853 					MGMT_STATUS_NOT_POWERED,
5854 					&cp->type, sizeof(cp->type));
5855 		goto failed;
5856 	}
5857 
5858 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5859 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5860 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5861 					&cp->type, sizeof(cp->type));
5862 		goto failed;
5863 	}
5864 
5865 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5866 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5867 					&cp->type, sizeof(cp->type));
5868 		goto failed;
5869 	}
5870 
5871 	/* Can't start discovery when it is paused */
5872 	if (hdev->discovery_paused) {
5873 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5874 					&cp->type, sizeof(cp->type));
5875 		goto failed;
5876 	}
5877 
5878 	/* Clear the discovery filter first to free any previously
5879 	 * allocated memory for the UUID list.
5880 	 */
5881 	hci_discovery_filter_clear(hdev);
5882 
5883 	hdev->discovery.type = cp->type;
5884 	hdev->discovery.report_invalid_rssi = false;
5885 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5886 		hdev->discovery.limited = true;
5887 	else
5888 		hdev->discovery.limited = false;
5889 
5890 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5891 	if (!cmd) {
5892 		err = -ENOMEM;
5893 		goto failed;
5894 	}
5895 
5896 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5897 				 start_discovery_complete);
5898 	if (err < 0) {
5899 		mgmt_pending_remove(cmd);
5900 		goto failed;
5901 	}
5902 
5903 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5904 
5905 failed:
5906 	hci_dev_unlock(hdev);
5907 	return err;
5908 }
5909 
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5910 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5911 			   void *data, u16 len)
5912 {
5913 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5914 					data, len);
5915 }
5916 
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5917 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5918 				   void *data, u16 len)
5919 {
5920 	return start_discovery_internal(sk, hdev,
5921 					MGMT_OP_START_LIMITED_DISCOVERY,
5922 					data, len);
5923 }
5924 
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5925 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5926 				   void *data, u16 len)
5927 {
5928 	struct mgmt_cp_start_service_discovery *cp = data;
5929 	struct mgmt_pending_cmd *cmd;
5930 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5931 	u16 uuid_count, expected_len;
5932 	u8 status;
5933 	int err;
5934 
5935 	bt_dev_dbg(hdev, "sock %p", sk);
5936 
5937 	hci_dev_lock(hdev);
5938 
5939 	if (!hdev_is_powered(hdev)) {
5940 		err = mgmt_cmd_complete(sk, hdev->id,
5941 					MGMT_OP_START_SERVICE_DISCOVERY,
5942 					MGMT_STATUS_NOT_POWERED,
5943 					&cp->type, sizeof(cp->type));
5944 		goto failed;
5945 	}
5946 
5947 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5948 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5949 		err = mgmt_cmd_complete(sk, hdev->id,
5950 					MGMT_OP_START_SERVICE_DISCOVERY,
5951 					MGMT_STATUS_BUSY, &cp->type,
5952 					sizeof(cp->type));
5953 		goto failed;
5954 	}
5955 
5956 	if (hdev->discovery_paused) {
5957 		err = mgmt_cmd_complete(sk, hdev->id,
5958 					MGMT_OP_START_SERVICE_DISCOVERY,
5959 					MGMT_STATUS_BUSY, &cp->type,
5960 					sizeof(cp->type));
5961 		goto failed;
5962 	}
5963 
5964 	uuid_count = __le16_to_cpu(cp->uuid_count);
5965 	if (uuid_count > max_uuid_count) {
5966 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5967 			   uuid_count);
5968 		err = mgmt_cmd_complete(sk, hdev->id,
5969 					MGMT_OP_START_SERVICE_DISCOVERY,
5970 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5971 					sizeof(cp->type));
5972 		goto failed;
5973 	}
5974 
5975 	expected_len = sizeof(*cp) + uuid_count * 16;
5976 	if (expected_len != len) {
5977 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5978 			   expected_len, len);
5979 		err = mgmt_cmd_complete(sk, hdev->id,
5980 					MGMT_OP_START_SERVICE_DISCOVERY,
5981 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5982 					sizeof(cp->type));
5983 		goto failed;
5984 	}
5985 
5986 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5987 		err = mgmt_cmd_complete(sk, hdev->id,
5988 					MGMT_OP_START_SERVICE_DISCOVERY,
5989 					status, &cp->type, sizeof(cp->type));
5990 		goto failed;
5991 	}
5992 
5993 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5994 			       hdev, data, len);
5995 	if (!cmd) {
5996 		err = -ENOMEM;
5997 		goto failed;
5998 	}
5999 
6000 	/* Clear the discovery filter first to free any previously
6001 	 * allocated memory for the UUID list.
6002 	 */
6003 	hci_discovery_filter_clear(hdev);
6004 
6005 	hdev->discovery.result_filtering = true;
6006 	hdev->discovery.type = cp->type;
6007 	hdev->discovery.rssi = cp->rssi;
6008 	hdev->discovery.uuid_count = uuid_count;
6009 
6010 	if (uuid_count > 0) {
6011 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6012 						GFP_KERNEL);
6013 		if (!hdev->discovery.uuids) {
6014 			err = mgmt_cmd_complete(sk, hdev->id,
6015 						MGMT_OP_START_SERVICE_DISCOVERY,
6016 						MGMT_STATUS_FAILED,
6017 						&cp->type, sizeof(cp->type));
6018 			mgmt_pending_remove(cmd);
6019 			goto failed;
6020 		}
6021 	}
6022 
6023 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6024 				 start_discovery_complete);
6025 	if (err < 0) {
6026 		mgmt_pending_remove(cmd);
6027 		goto failed;
6028 	}
6029 
6030 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6031 
6032 failed:
6033 	hci_dev_unlock(hdev);
6034 	return err;
6035 }
6036 
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6037 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6038 {
6039 	struct mgmt_pending_cmd *cmd;
6040 
6041 	bt_dev_dbg(hdev, "status %u", status);
6042 
6043 	hci_dev_lock(hdev);
6044 
6045 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6046 	if (cmd) {
6047 		cmd->cmd_complete(cmd, mgmt_status(status));
6048 		mgmt_pending_remove(cmd);
6049 	}
6050 
6051 	hci_dev_unlock(hdev);
6052 }
6053 
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6054 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6055 {
6056 	struct mgmt_pending_cmd *cmd = data;
6057 
6058 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6059 		return;
6060 
6061 	bt_dev_dbg(hdev, "err %d", err);
6062 
6063 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6064 			  cmd->param, 1);
6065 	mgmt_pending_remove(cmd);
6066 
6067 	if (!err)
6068 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6069 }
6070 
stop_discovery_sync(struct hci_dev * hdev,void * data)6071 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6072 {
6073 	return hci_stop_discovery_sync(hdev);
6074 }
6075 
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6076 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6077 			  u16 len)
6078 {
6079 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6080 	struct mgmt_pending_cmd *cmd;
6081 	int err;
6082 
6083 	bt_dev_dbg(hdev, "sock %p", sk);
6084 
6085 	hci_dev_lock(hdev);
6086 
6087 	if (!hci_discovery_active(hdev)) {
6088 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6089 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6090 					sizeof(mgmt_cp->type));
6091 		goto unlock;
6092 	}
6093 
6094 	if (hdev->discovery.type != mgmt_cp->type) {
6095 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6096 					MGMT_STATUS_INVALID_PARAMS,
6097 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6098 		goto unlock;
6099 	}
6100 
6101 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6102 	if (!cmd) {
6103 		err = -ENOMEM;
6104 		goto unlock;
6105 	}
6106 
6107 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6108 				 stop_discovery_complete);
6109 	if (err < 0) {
6110 		mgmt_pending_remove(cmd);
6111 		goto unlock;
6112 	}
6113 
6114 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6115 
6116 unlock:
6117 	hci_dev_unlock(hdev);
6118 	return err;
6119 }
6120 
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6121 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6122 			u16 len)
6123 {
6124 	struct mgmt_cp_confirm_name *cp = data;
6125 	struct inquiry_entry *e;
6126 	int err;
6127 
6128 	bt_dev_dbg(hdev, "sock %p", sk);
6129 
6130 	hci_dev_lock(hdev);
6131 
6132 	if (!hci_discovery_active(hdev)) {
6133 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6134 					MGMT_STATUS_FAILED, &cp->addr,
6135 					sizeof(cp->addr));
6136 		goto failed;
6137 	}
6138 
6139 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6140 	if (!e) {
6141 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6142 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6143 					sizeof(cp->addr));
6144 		goto failed;
6145 	}
6146 
6147 	if (cp->name_known) {
6148 		e->name_state = NAME_KNOWN;
6149 		list_del(&e->list);
6150 	} else {
6151 		e->name_state = NAME_NEEDED;
6152 		hci_inquiry_cache_update_resolve(hdev, e);
6153 	}
6154 
6155 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6156 				&cp->addr, sizeof(cp->addr));
6157 
6158 failed:
6159 	hci_dev_unlock(hdev);
6160 	return err;
6161 }
6162 
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6163 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6164 			u16 len)
6165 {
6166 	struct mgmt_cp_block_device *cp = data;
6167 	u8 status;
6168 	int err;
6169 
6170 	bt_dev_dbg(hdev, "sock %p", sk);
6171 
6172 	if (!bdaddr_type_is_valid(cp->addr.type))
6173 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6174 					 MGMT_STATUS_INVALID_PARAMS,
6175 					 &cp->addr, sizeof(cp->addr));
6176 
6177 	hci_dev_lock(hdev);
6178 
6179 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6180 				  cp->addr.type);
6181 	if (err < 0) {
6182 		status = MGMT_STATUS_FAILED;
6183 		goto done;
6184 	}
6185 
6186 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6187 		   sk);
6188 	status = MGMT_STATUS_SUCCESS;
6189 
6190 done:
6191 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6192 				&cp->addr, sizeof(cp->addr));
6193 
6194 	hci_dev_unlock(hdev);
6195 
6196 	return err;
6197 }
6198 
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6199 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6200 			  u16 len)
6201 {
6202 	struct mgmt_cp_unblock_device *cp = data;
6203 	u8 status;
6204 	int err;
6205 
6206 	bt_dev_dbg(hdev, "sock %p", sk);
6207 
6208 	if (!bdaddr_type_is_valid(cp->addr.type))
6209 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6210 					 MGMT_STATUS_INVALID_PARAMS,
6211 					 &cp->addr, sizeof(cp->addr));
6212 
6213 	hci_dev_lock(hdev);
6214 
6215 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6216 				  cp->addr.type);
6217 	if (err < 0) {
6218 		status = MGMT_STATUS_INVALID_PARAMS;
6219 		goto done;
6220 	}
6221 
6222 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6223 		   sk);
6224 	status = MGMT_STATUS_SUCCESS;
6225 
6226 done:
6227 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6228 				&cp->addr, sizeof(cp->addr));
6229 
6230 	hci_dev_unlock(hdev);
6231 
6232 	return err;
6233 }
6234 
set_device_id_sync(struct hci_dev * hdev,void * data)6235 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6236 {
6237 	return hci_update_eir_sync(hdev);
6238 }
6239 
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6240 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6241 			 u16 len)
6242 {
6243 	struct mgmt_cp_set_device_id *cp = data;
6244 	int err;
6245 	__u16 source;
6246 
6247 	bt_dev_dbg(hdev, "sock %p", sk);
6248 
6249 	source = __le16_to_cpu(cp->source);
6250 
6251 	if (source > 0x0002)
6252 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6253 				       MGMT_STATUS_INVALID_PARAMS);
6254 
6255 	hci_dev_lock(hdev);
6256 
6257 	hdev->devid_source = source;
6258 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6259 	hdev->devid_product = __le16_to_cpu(cp->product);
6260 	hdev->devid_version = __le16_to_cpu(cp->version);
6261 
6262 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6263 				NULL, 0);
6264 
6265 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6266 
6267 	hci_dev_unlock(hdev);
6268 
6269 	return err;
6270 }
6271 
enable_advertising_instance(struct hci_dev * hdev,int err)6272 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6273 {
6274 	if (err)
6275 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6276 	else
6277 		bt_dev_dbg(hdev, "status %d", err);
6278 }
6279 
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6280 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6281 {
6282 	struct cmd_lookup match = { NULL, hdev };
6283 	u8 instance;
6284 	struct adv_info *adv_instance;
6285 	u8 status = mgmt_status(err);
6286 
6287 	if (status) {
6288 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6289 				     cmd_status_rsp, &status);
6290 		return;
6291 	}
6292 
6293 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6294 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6295 	else
6296 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6297 
6298 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6299 			     &match);
6300 
6301 	new_settings(hdev, match.sk);
6302 
6303 	if (match.sk)
6304 		sock_put(match.sk);
6305 
6306 	/* If "Set Advertising" was just disabled and instance advertising was
6307 	 * set up earlier, then re-enable multi-instance advertising.
6308 	 */
6309 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6310 	    list_empty(&hdev->adv_instances))
6311 		return;
6312 
6313 	instance = hdev->cur_adv_instance;
6314 	if (!instance) {
6315 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6316 							struct adv_info, list);
6317 		if (!adv_instance)
6318 			return;
6319 
6320 		instance = adv_instance->instance;
6321 	}
6322 
6323 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6324 
6325 	enable_advertising_instance(hdev, err);
6326 }
6327 
set_adv_sync(struct hci_dev * hdev,void * data)6328 static int set_adv_sync(struct hci_dev *hdev, void *data)
6329 {
6330 	struct mgmt_pending_cmd *cmd = data;
6331 	struct mgmt_mode *cp = cmd->param;
6332 	u8 val = !!cp->val;
6333 
6334 	if (cp->val == 0x02)
6335 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6336 	else
6337 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6338 
6339 	cancel_adv_timeout(hdev);
6340 
6341 	if (val) {
6342 		/* Switch to instance "0" for the Set Advertising setting.
6343 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6344 		 * HCI_ADVERTISING flag is not yet set.
6345 		 */
6346 		hdev->cur_adv_instance = 0x00;
6347 
6348 		if (ext_adv_capable(hdev)) {
6349 			hci_start_ext_adv_sync(hdev, 0x00);
6350 		} else {
6351 			hci_update_adv_data_sync(hdev, 0x00);
6352 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6353 			hci_enable_advertising_sync(hdev);
6354 		}
6355 	} else {
6356 		hci_disable_advertising_sync(hdev);
6357 	}
6358 
6359 	return 0;
6360 }
6361 
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6362 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6363 			   u16 len)
6364 {
6365 	struct mgmt_mode *cp = data;
6366 	struct mgmt_pending_cmd *cmd;
6367 	u8 val, status;
6368 	int err;
6369 
6370 	bt_dev_dbg(hdev, "sock %p", sk);
6371 
6372 	status = mgmt_le_support(hdev);
6373 	if (status)
6374 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6375 				       status);
6376 
6377 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6378 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6379 				       MGMT_STATUS_INVALID_PARAMS);
6380 
6381 	if (hdev->advertising_paused)
6382 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6383 				       MGMT_STATUS_BUSY);
6384 
6385 	hci_dev_lock(hdev);
6386 
6387 	val = !!cp->val;
6388 
6389 	/* The following conditions are ones which mean that we should
6390 	 * not do any HCI communication but directly send a mgmt
6391 	 * response to user space (after toggling the flag if
6392 	 * necessary).
6393 	 */
6394 	if (!hdev_is_powered(hdev) ||
6395 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6396 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6397 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6398 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6399 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6400 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6401 		bool changed;
6402 
6403 		if (cp->val) {
6404 			hdev->cur_adv_instance = 0x00;
6405 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6406 			if (cp->val == 0x02)
6407 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6408 			else
6409 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6410 		} else {
6411 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6412 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6413 		}
6414 
6415 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6416 		if (err < 0)
6417 			goto unlock;
6418 
6419 		if (changed)
6420 			err = new_settings(hdev, sk);
6421 
6422 		goto unlock;
6423 	}
6424 
6425 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6426 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6427 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6428 				      MGMT_STATUS_BUSY);
6429 		goto unlock;
6430 	}
6431 
6432 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6433 	if (!cmd)
6434 		err = -ENOMEM;
6435 	else
6436 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6437 					 set_advertising_complete);
6438 
6439 	if (err < 0 && cmd)
6440 		mgmt_pending_remove(cmd);
6441 
6442 unlock:
6443 	hci_dev_unlock(hdev);
6444 	return err;
6445 }
6446 
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6447 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6448 			      void *data, u16 len)
6449 {
6450 	struct mgmt_cp_set_static_address *cp = data;
6451 	int err;
6452 
6453 	bt_dev_dbg(hdev, "sock %p", sk);
6454 
6455 	if (!lmp_le_capable(hdev))
6456 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6457 				       MGMT_STATUS_NOT_SUPPORTED);
6458 
6459 	if (hdev_is_powered(hdev))
6460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6461 				       MGMT_STATUS_REJECTED);
6462 
6463 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6464 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6465 			return mgmt_cmd_status(sk, hdev->id,
6466 					       MGMT_OP_SET_STATIC_ADDRESS,
6467 					       MGMT_STATUS_INVALID_PARAMS);
6468 
6469 		/* Two most significant bits shall be set */
6470 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6471 			return mgmt_cmd_status(sk, hdev->id,
6472 					       MGMT_OP_SET_STATIC_ADDRESS,
6473 					       MGMT_STATUS_INVALID_PARAMS);
6474 	}
6475 
6476 	hci_dev_lock(hdev);
6477 
6478 	bacpy(&hdev->static_addr, &cp->bdaddr);
6479 
6480 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6481 	if (err < 0)
6482 		goto unlock;
6483 
6484 	err = new_settings(hdev, sk);
6485 
6486 unlock:
6487 	hci_dev_unlock(hdev);
6488 	return err;
6489 }
6490 
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6491 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6492 			   void *data, u16 len)
6493 {
6494 	struct mgmt_cp_set_scan_params *cp = data;
6495 	__u16 interval, window;
6496 	int err;
6497 
6498 	bt_dev_dbg(hdev, "sock %p", sk);
6499 
6500 	if (!lmp_le_capable(hdev))
6501 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6502 				       MGMT_STATUS_NOT_SUPPORTED);
6503 
6504 	interval = __le16_to_cpu(cp->interval);
6505 
6506 	if (interval < 0x0004 || interval > 0x4000)
6507 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6508 				       MGMT_STATUS_INVALID_PARAMS);
6509 
6510 	window = __le16_to_cpu(cp->window);
6511 
6512 	if (window < 0x0004 || window > 0x4000)
6513 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6514 				       MGMT_STATUS_INVALID_PARAMS);
6515 
6516 	if (window > interval)
6517 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6518 				       MGMT_STATUS_INVALID_PARAMS);
6519 
6520 	hci_dev_lock(hdev);
6521 
6522 	hdev->le_scan_interval = interval;
6523 	hdev->le_scan_window = window;
6524 
6525 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6526 				NULL, 0);
6527 
6528 	/* If background scan is running, restart it so new parameters are
6529 	 * loaded.
6530 	 */
6531 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6532 	    hdev->discovery.state == DISCOVERY_STOPPED)
6533 		hci_update_passive_scan(hdev);
6534 
6535 	hci_dev_unlock(hdev);
6536 
6537 	return err;
6538 }
6539 
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6540 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6541 {
6542 	struct mgmt_pending_cmd *cmd = data;
6543 
6544 	bt_dev_dbg(hdev, "err %d", err);
6545 
6546 	if (err) {
6547 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6548 				mgmt_status(err));
6549 	} else {
6550 		struct mgmt_mode *cp = cmd->param;
6551 
6552 		if (cp->val)
6553 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6554 		else
6555 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6556 
6557 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6558 		new_settings(hdev, cmd->sk);
6559 	}
6560 
6561 	mgmt_pending_free(cmd);
6562 }
6563 
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6564 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6565 {
6566 	struct mgmt_pending_cmd *cmd = data;
6567 	struct mgmt_mode *cp = cmd->param;
6568 
6569 	return hci_write_fast_connectable_sync(hdev, cp->val);
6570 }
6571 
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6572 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6573 				void *data, u16 len)
6574 {
6575 	struct mgmt_mode *cp = data;
6576 	struct mgmt_pending_cmd *cmd;
6577 	int err;
6578 
6579 	bt_dev_dbg(hdev, "sock %p", sk);
6580 
6581 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6582 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6583 		return mgmt_cmd_status(sk, hdev->id,
6584 				       MGMT_OP_SET_FAST_CONNECTABLE,
6585 				       MGMT_STATUS_NOT_SUPPORTED);
6586 
6587 	if (cp->val != 0x00 && cp->val != 0x01)
6588 		return mgmt_cmd_status(sk, hdev->id,
6589 				       MGMT_OP_SET_FAST_CONNECTABLE,
6590 				       MGMT_STATUS_INVALID_PARAMS);
6591 
6592 	hci_dev_lock(hdev);
6593 
6594 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6595 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6596 		goto unlock;
6597 	}
6598 
6599 	if (!hdev_is_powered(hdev)) {
6600 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6601 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6602 		new_settings(hdev, sk);
6603 		goto unlock;
6604 	}
6605 
6606 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6607 			       len);
6608 	if (!cmd)
6609 		err = -ENOMEM;
6610 	else
6611 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6612 					 fast_connectable_complete);
6613 
6614 	if (err < 0) {
6615 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6616 				MGMT_STATUS_FAILED);
6617 
6618 		if (cmd)
6619 			mgmt_pending_free(cmd);
6620 	}
6621 
6622 unlock:
6623 	hci_dev_unlock(hdev);
6624 
6625 	return err;
6626 }
6627 
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6628 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6629 {
6630 	struct mgmt_pending_cmd *cmd = data;
6631 
6632 	bt_dev_dbg(hdev, "err %d", err);
6633 
6634 	if (err) {
6635 		u8 mgmt_err = mgmt_status(err);
6636 
6637 		/* We need to restore the flag if related HCI commands
6638 		 * failed.
6639 		 */
6640 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6641 
6642 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6643 	} else {
6644 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6645 		new_settings(hdev, cmd->sk);
6646 	}
6647 
6648 	mgmt_pending_free(cmd);
6649 }
6650 
set_bredr_sync(struct hci_dev * hdev,void * data)6651 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6652 {
6653 	int status;
6654 
6655 	status = hci_write_fast_connectable_sync(hdev, false);
6656 
6657 	if (!status)
6658 		status = hci_update_scan_sync(hdev);
6659 
6660 	/* Since only the advertising data flags will change, there
6661 	 * is no need to update the scan response data.
6662 	 */
6663 	if (!status)
6664 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6665 
6666 	return status;
6667 }
6668 
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6669 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6670 {
6671 	struct mgmt_mode *cp = data;
6672 	struct mgmt_pending_cmd *cmd;
6673 	int err;
6674 
6675 	bt_dev_dbg(hdev, "sock %p", sk);
6676 
6677 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6678 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6679 				       MGMT_STATUS_NOT_SUPPORTED);
6680 
6681 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6682 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6683 				       MGMT_STATUS_REJECTED);
6684 
6685 	if (cp->val != 0x00 && cp->val != 0x01)
6686 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6687 				       MGMT_STATUS_INVALID_PARAMS);
6688 
6689 	hci_dev_lock(hdev);
6690 
6691 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6692 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6693 		goto unlock;
6694 	}
6695 
6696 	if (!hdev_is_powered(hdev)) {
6697 		if (!cp->val) {
6698 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6699 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6700 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6701 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6702 		}
6703 
6704 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6705 
6706 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6707 		if (err < 0)
6708 			goto unlock;
6709 
6710 		err = new_settings(hdev, sk);
6711 		goto unlock;
6712 	}
6713 
6714 	/* Reject disabling when powered on */
6715 	if (!cp->val) {
6716 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6717 				      MGMT_STATUS_REJECTED);
6718 		goto unlock;
6719 	} else {
6720 		/* When configuring a dual-mode controller to operate
6721 		 * with LE only and using a static address, then switching
6722 		 * BR/EDR back on is not allowed.
6723 		 *
6724 		 * Dual-mode controllers shall operate with the public
6725 		 * address as its identity address for BR/EDR and LE. So
6726 		 * reject the attempt to create an invalid configuration.
6727 		 *
6728 		 * The same restrictions applies when secure connections
6729 		 * has been enabled. For BR/EDR this is a controller feature
6730 		 * while for LE it is a host stack feature. This means that
6731 		 * switching BR/EDR back on when secure connections has been
6732 		 * enabled is not a supported transaction.
6733 		 */
6734 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6735 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6736 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6737 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6738 					      MGMT_STATUS_REJECTED);
6739 			goto unlock;
6740 		}
6741 	}
6742 
6743 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6744 	if (!cmd)
6745 		err = -ENOMEM;
6746 	else
6747 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6748 					 set_bredr_complete);
6749 
6750 	if (err < 0) {
6751 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752 				MGMT_STATUS_FAILED);
6753 		if (cmd)
6754 			mgmt_pending_free(cmd);
6755 
6756 		goto unlock;
6757 	}
6758 
6759 	/* We need to flip the bit already here so that
6760 	 * hci_req_update_adv_data generates the correct flags.
6761 	 */
6762 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6763 
6764 unlock:
6765 	hci_dev_unlock(hdev);
6766 	return err;
6767 }
6768 
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6769 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6770 {
6771 	struct mgmt_pending_cmd *cmd = data;
6772 	struct mgmt_mode *cp;
6773 
6774 	bt_dev_dbg(hdev, "err %d", err);
6775 
6776 	if (err) {
6777 		u8 mgmt_err = mgmt_status(err);
6778 
6779 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6780 		goto done;
6781 	}
6782 
6783 	cp = cmd->param;
6784 
6785 	switch (cp->val) {
6786 	case 0x00:
6787 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6788 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6789 		break;
6790 	case 0x01:
6791 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6792 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6793 		break;
6794 	case 0x02:
6795 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6796 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6797 		break;
6798 	}
6799 
6800 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6801 	new_settings(hdev, cmd->sk);
6802 
6803 done:
6804 	mgmt_pending_free(cmd);
6805 }
6806 
set_secure_conn_sync(struct hci_dev * hdev,void * data)6807 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6808 {
6809 	struct mgmt_pending_cmd *cmd = data;
6810 	struct mgmt_mode *cp = cmd->param;
6811 	u8 val = !!cp->val;
6812 
6813 	/* Force write of val */
6814 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6815 
6816 	return hci_write_sc_support_sync(hdev, val);
6817 }
6818 
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6819 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6820 			   void *data, u16 len)
6821 {
6822 	struct mgmt_mode *cp = data;
6823 	struct mgmt_pending_cmd *cmd;
6824 	u8 val;
6825 	int err;
6826 
6827 	bt_dev_dbg(hdev, "sock %p", sk);
6828 
6829 	if (!lmp_sc_capable(hdev) &&
6830 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6831 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6832 				       MGMT_STATUS_NOT_SUPPORTED);
6833 
6834 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6835 	    lmp_sc_capable(hdev) &&
6836 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6838 				       MGMT_STATUS_REJECTED);
6839 
6840 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6841 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6842 				       MGMT_STATUS_INVALID_PARAMS);
6843 
6844 	hci_dev_lock(hdev);
6845 
6846 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6847 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6848 		bool changed;
6849 
6850 		if (cp->val) {
6851 			changed = !hci_dev_test_and_set_flag(hdev,
6852 							     HCI_SC_ENABLED);
6853 			if (cp->val == 0x02)
6854 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6855 			else
6856 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6857 		} else {
6858 			changed = hci_dev_test_and_clear_flag(hdev,
6859 							      HCI_SC_ENABLED);
6860 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 		}
6862 
6863 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6864 		if (err < 0)
6865 			goto failed;
6866 
6867 		if (changed)
6868 			err = new_settings(hdev, sk);
6869 
6870 		goto failed;
6871 	}
6872 
6873 	val = !!cp->val;
6874 
6875 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6876 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6877 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6878 		goto failed;
6879 	}
6880 
6881 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6882 	if (!cmd)
6883 		err = -ENOMEM;
6884 	else
6885 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6886 					 set_secure_conn_complete);
6887 
6888 	if (err < 0) {
6889 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6890 				MGMT_STATUS_FAILED);
6891 		if (cmd)
6892 			mgmt_pending_free(cmd);
6893 	}
6894 
6895 failed:
6896 	hci_dev_unlock(hdev);
6897 	return err;
6898 }
6899 
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6900 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6901 			  void *data, u16 len)
6902 {
6903 	struct mgmt_mode *cp = data;
6904 	bool changed, use_changed;
6905 	int err;
6906 
6907 	bt_dev_dbg(hdev, "sock %p", sk);
6908 
6909 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6910 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6911 				       MGMT_STATUS_INVALID_PARAMS);
6912 
6913 	hci_dev_lock(hdev);
6914 
6915 	if (cp->val)
6916 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6917 	else
6918 		changed = hci_dev_test_and_clear_flag(hdev,
6919 						      HCI_KEEP_DEBUG_KEYS);
6920 
6921 	if (cp->val == 0x02)
6922 		use_changed = !hci_dev_test_and_set_flag(hdev,
6923 							 HCI_USE_DEBUG_KEYS);
6924 	else
6925 		use_changed = hci_dev_test_and_clear_flag(hdev,
6926 							  HCI_USE_DEBUG_KEYS);
6927 
6928 	if (hdev_is_powered(hdev) && use_changed &&
6929 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6930 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6931 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6932 			     sizeof(mode), &mode);
6933 	}
6934 
6935 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6936 	if (err < 0)
6937 		goto unlock;
6938 
6939 	if (changed)
6940 		err = new_settings(hdev, sk);
6941 
6942 unlock:
6943 	hci_dev_unlock(hdev);
6944 	return err;
6945 }
6946 
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6947 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6948 		       u16 len)
6949 {
6950 	struct mgmt_cp_set_privacy *cp = cp_data;
6951 	bool changed;
6952 	int err;
6953 
6954 	bt_dev_dbg(hdev, "sock %p", sk);
6955 
6956 	if (!lmp_le_capable(hdev))
6957 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6958 				       MGMT_STATUS_NOT_SUPPORTED);
6959 
6960 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6962 				       MGMT_STATUS_INVALID_PARAMS);
6963 
6964 	if (hdev_is_powered(hdev))
6965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6966 				       MGMT_STATUS_REJECTED);
6967 
6968 	hci_dev_lock(hdev);
6969 
6970 	/* If user space supports this command it is also expected to
6971 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6972 	 */
6973 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6974 
6975 	if (cp->privacy) {
6976 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6977 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6978 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6979 		hci_adv_instances_set_rpa_expired(hdev, true);
6980 		if (cp->privacy == 0x02)
6981 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6982 		else
6983 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6984 	} else {
6985 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6986 		memset(hdev->irk, 0, sizeof(hdev->irk));
6987 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6988 		hci_adv_instances_set_rpa_expired(hdev, false);
6989 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6990 	}
6991 
6992 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6993 	if (err < 0)
6994 		goto unlock;
6995 
6996 	if (changed)
6997 		err = new_settings(hdev, sk);
6998 
6999 unlock:
7000 	hci_dev_unlock(hdev);
7001 	return err;
7002 }
7003 
irk_is_valid(struct mgmt_irk_info * irk)7004 static bool irk_is_valid(struct mgmt_irk_info *irk)
7005 {
7006 	switch (irk->addr.type) {
7007 	case BDADDR_LE_PUBLIC:
7008 		return true;
7009 
7010 	case BDADDR_LE_RANDOM:
7011 		/* Two most significant bits shall be set */
7012 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7013 			return false;
7014 		return true;
7015 	}
7016 
7017 	return false;
7018 }
7019 
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7020 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7021 		     u16 len)
7022 {
7023 	struct mgmt_cp_load_irks *cp = cp_data;
7024 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7025 				   sizeof(struct mgmt_irk_info));
7026 	u16 irk_count, expected_len;
7027 	int i, err;
7028 
7029 	bt_dev_dbg(hdev, "sock %p", sk);
7030 
7031 	if (!lmp_le_capable(hdev))
7032 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7033 				       MGMT_STATUS_NOT_SUPPORTED);
7034 
7035 	irk_count = __le16_to_cpu(cp->irk_count);
7036 	if (irk_count > max_irk_count) {
7037 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7038 			   irk_count);
7039 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7040 				       MGMT_STATUS_INVALID_PARAMS);
7041 	}
7042 
7043 	expected_len = struct_size(cp, irks, irk_count);
7044 	if (expected_len != len) {
7045 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7046 			   expected_len, len);
7047 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7048 				       MGMT_STATUS_INVALID_PARAMS);
7049 	}
7050 
7051 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7052 
7053 	for (i = 0; i < irk_count; i++) {
7054 		struct mgmt_irk_info *key = &cp->irks[i];
7055 
7056 		if (!irk_is_valid(key))
7057 			return mgmt_cmd_status(sk, hdev->id,
7058 					       MGMT_OP_LOAD_IRKS,
7059 					       MGMT_STATUS_INVALID_PARAMS);
7060 	}
7061 
7062 	hci_dev_lock(hdev);
7063 
7064 	hci_smp_irks_clear(hdev);
7065 
7066 	for (i = 0; i < irk_count; i++) {
7067 		struct mgmt_irk_info *irk = &cp->irks[i];
7068 		u8 addr_type = le_addr_type(irk->addr.type);
7069 
7070 		if (hci_is_blocked_key(hdev,
7071 				       HCI_BLOCKED_KEY_TYPE_IRK,
7072 				       irk->val)) {
7073 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7074 				    &irk->addr.bdaddr);
7075 			continue;
7076 		}
7077 
7078 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7079 		if (irk->addr.type == BDADDR_BREDR)
7080 			addr_type = BDADDR_BREDR;
7081 
7082 		hci_add_irk(hdev, &irk->addr.bdaddr,
7083 			    addr_type, irk->val,
7084 			    BDADDR_ANY);
7085 	}
7086 
7087 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7088 
7089 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7090 
7091 	hci_dev_unlock(hdev);
7092 
7093 	return err;
7094 }
7095 
ltk_is_valid(struct mgmt_ltk_info * key)7096 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7097 {
7098 	if (key->initiator != 0x00 && key->initiator != 0x01)
7099 		return false;
7100 
7101 	switch (key->addr.type) {
7102 	case BDADDR_LE_PUBLIC:
7103 		return true;
7104 
7105 	case BDADDR_LE_RANDOM:
7106 		/* Two most significant bits shall be set */
7107 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7108 			return false;
7109 		return true;
7110 	}
7111 
7112 	return false;
7113 }
7114 
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7115 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7116 			       void *cp_data, u16 len)
7117 {
7118 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7119 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7120 				   sizeof(struct mgmt_ltk_info));
7121 	u16 key_count, expected_len;
7122 	int i, err;
7123 
7124 	bt_dev_dbg(hdev, "sock %p", sk);
7125 
7126 	if (!lmp_le_capable(hdev))
7127 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7128 				       MGMT_STATUS_NOT_SUPPORTED);
7129 
7130 	key_count = __le16_to_cpu(cp->key_count);
7131 	if (key_count > max_key_count) {
7132 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7133 			   key_count);
7134 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7135 				       MGMT_STATUS_INVALID_PARAMS);
7136 	}
7137 
7138 	expected_len = struct_size(cp, keys, key_count);
7139 	if (expected_len != len) {
7140 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7141 			   expected_len, len);
7142 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7143 				       MGMT_STATUS_INVALID_PARAMS);
7144 	}
7145 
7146 	bt_dev_dbg(hdev, "key_count %u", key_count);
7147 
7148 	for (i = 0; i < key_count; i++) {
7149 		struct mgmt_ltk_info *key = &cp->keys[i];
7150 
7151 		if (!ltk_is_valid(key))
7152 			return mgmt_cmd_status(sk, hdev->id,
7153 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7154 					       MGMT_STATUS_INVALID_PARAMS);
7155 	}
7156 
7157 	hci_dev_lock(hdev);
7158 
7159 	hci_smp_ltks_clear(hdev);
7160 
7161 	for (i = 0; i < key_count; i++) {
7162 		struct mgmt_ltk_info *key = &cp->keys[i];
7163 		u8 type, authenticated;
7164 		u8 addr_type = le_addr_type(key->addr.type);
7165 
7166 		if (hci_is_blocked_key(hdev,
7167 				       HCI_BLOCKED_KEY_TYPE_LTK,
7168 				       key->val)) {
7169 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7170 				    &key->addr.bdaddr);
7171 			continue;
7172 		}
7173 
7174 		switch (key->type) {
7175 		case MGMT_LTK_UNAUTHENTICATED:
7176 			authenticated = 0x00;
7177 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7178 			break;
7179 		case MGMT_LTK_AUTHENTICATED:
7180 			authenticated = 0x01;
7181 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7182 			break;
7183 		case MGMT_LTK_P256_UNAUTH:
7184 			authenticated = 0x00;
7185 			type = SMP_LTK_P256;
7186 			break;
7187 		case MGMT_LTK_P256_AUTH:
7188 			authenticated = 0x01;
7189 			type = SMP_LTK_P256;
7190 			break;
7191 		case MGMT_LTK_P256_DEBUG:
7192 			authenticated = 0x00;
7193 			type = SMP_LTK_P256_DEBUG;
7194 			fallthrough;
7195 		default:
7196 			continue;
7197 		}
7198 
7199 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7200 		if (key->addr.type == BDADDR_BREDR)
7201 			addr_type = BDADDR_BREDR;
7202 
7203 		hci_add_ltk(hdev, &key->addr.bdaddr,
7204 			    addr_type, type, authenticated,
7205 			    key->val, key->enc_size, key->ediv, key->rand);
7206 	}
7207 
7208 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7209 			   NULL, 0);
7210 
7211 	hci_dev_unlock(hdev);
7212 
7213 	return err;
7214 }
7215 
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7216 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7217 {
7218 	struct mgmt_pending_cmd *cmd = data;
7219 	struct hci_conn *conn = cmd->user_data;
7220 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7221 	struct mgmt_rp_get_conn_info rp;
7222 	u8 status;
7223 
7224 	bt_dev_dbg(hdev, "err %d", err);
7225 
7226 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7227 
7228 	status = mgmt_status(err);
7229 	if (status == MGMT_STATUS_SUCCESS) {
7230 		rp.rssi = conn->rssi;
7231 		rp.tx_power = conn->tx_power;
7232 		rp.max_tx_power = conn->max_tx_power;
7233 	} else {
7234 		rp.rssi = HCI_RSSI_INVALID;
7235 		rp.tx_power = HCI_TX_POWER_INVALID;
7236 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7237 	}
7238 
7239 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7240 			  &rp, sizeof(rp));
7241 
7242 	mgmt_pending_free(cmd);
7243 }
7244 
get_conn_info_sync(struct hci_dev * hdev,void * data)7245 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7246 {
7247 	struct mgmt_pending_cmd *cmd = data;
7248 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7249 	struct hci_conn *conn;
7250 	int err;
7251 	__le16   handle;
7252 
7253 	/* Make sure we are still connected */
7254 	if (cp->addr.type == BDADDR_BREDR)
7255 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7256 					       &cp->addr.bdaddr);
7257 	else
7258 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7259 
7260 	if (!conn || conn->state != BT_CONNECTED)
7261 		return MGMT_STATUS_NOT_CONNECTED;
7262 
7263 	cmd->user_data = conn;
7264 	handle = cpu_to_le16(conn->handle);
7265 
7266 	/* Refresh RSSI each time */
7267 	err = hci_read_rssi_sync(hdev, handle);
7268 
7269 	/* For LE links TX power does not change thus we don't need to
7270 	 * query for it once value is known.
7271 	 */
7272 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7273 		     conn->tx_power == HCI_TX_POWER_INVALID))
7274 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7275 
7276 	/* Max TX power needs to be read only once per connection */
7277 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7278 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7279 
7280 	return err;
7281 }
7282 
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7283 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7284 			 u16 len)
7285 {
7286 	struct mgmt_cp_get_conn_info *cp = data;
7287 	struct mgmt_rp_get_conn_info rp;
7288 	struct hci_conn *conn;
7289 	unsigned long conn_info_age;
7290 	int err = 0;
7291 
7292 	bt_dev_dbg(hdev, "sock %p", sk);
7293 
7294 	memset(&rp, 0, sizeof(rp));
7295 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7296 	rp.addr.type = cp->addr.type;
7297 
7298 	if (!bdaddr_type_is_valid(cp->addr.type))
7299 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7300 					 MGMT_STATUS_INVALID_PARAMS,
7301 					 &rp, sizeof(rp));
7302 
7303 	hci_dev_lock(hdev);
7304 
7305 	if (!hdev_is_powered(hdev)) {
7306 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7307 					MGMT_STATUS_NOT_POWERED, &rp,
7308 					sizeof(rp));
7309 		goto unlock;
7310 	}
7311 
7312 	if (cp->addr.type == BDADDR_BREDR)
7313 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7314 					       &cp->addr.bdaddr);
7315 	else
7316 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7317 
7318 	if (!conn || conn->state != BT_CONNECTED) {
7319 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7320 					MGMT_STATUS_NOT_CONNECTED, &rp,
7321 					sizeof(rp));
7322 		goto unlock;
7323 	}
7324 
7325 	/* To avoid client trying to guess when to poll again for information we
7326 	 * calculate conn info age as random value between min/max set in hdev.
7327 	 */
7328 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7329 						 hdev->conn_info_max_age - 1);
7330 
7331 	/* Query controller to refresh cached values if they are too old or were
7332 	 * never read.
7333 	 */
7334 	if (time_after(jiffies, conn->conn_info_timestamp +
7335 		       msecs_to_jiffies(conn_info_age)) ||
7336 	    !conn->conn_info_timestamp) {
7337 		struct mgmt_pending_cmd *cmd;
7338 
7339 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7340 				       len);
7341 		if (!cmd) {
7342 			err = -ENOMEM;
7343 		} else {
7344 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7345 						 cmd, get_conn_info_complete);
7346 		}
7347 
7348 		if (err < 0) {
7349 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7350 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7351 
7352 			if (cmd)
7353 				mgmt_pending_free(cmd);
7354 
7355 			goto unlock;
7356 		}
7357 
7358 		conn->conn_info_timestamp = jiffies;
7359 	} else {
7360 		/* Cache is valid, just reply with values cached in hci_conn */
7361 		rp.rssi = conn->rssi;
7362 		rp.tx_power = conn->tx_power;
7363 		rp.max_tx_power = conn->max_tx_power;
7364 
7365 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7366 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7367 	}
7368 
7369 unlock:
7370 	hci_dev_unlock(hdev);
7371 	return err;
7372 }
7373 
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7374 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7375 {
7376 	struct mgmt_pending_cmd *cmd = data;
7377 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7378 	struct mgmt_rp_get_clock_info rp;
7379 	struct hci_conn *conn = cmd->user_data;
7380 	u8 status = mgmt_status(err);
7381 
7382 	bt_dev_dbg(hdev, "err %d", err);
7383 
7384 	memset(&rp, 0, sizeof(rp));
7385 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7386 	rp.addr.type = cp->addr.type;
7387 
7388 	if (err)
7389 		goto complete;
7390 
7391 	rp.local_clock = cpu_to_le32(hdev->clock);
7392 
7393 	if (conn) {
7394 		rp.piconet_clock = cpu_to_le32(conn->clock);
7395 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7396 	}
7397 
7398 complete:
7399 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7400 			  sizeof(rp));
7401 
7402 	mgmt_pending_free(cmd);
7403 }
7404 
get_clock_info_sync(struct hci_dev * hdev,void * data)7405 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7406 {
7407 	struct mgmt_pending_cmd *cmd = data;
7408 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7409 	struct hci_cp_read_clock hci_cp;
7410 	struct hci_conn *conn;
7411 
7412 	memset(&hci_cp, 0, sizeof(hci_cp));
7413 	hci_read_clock_sync(hdev, &hci_cp);
7414 
7415 	/* Make sure connection still exists */
7416 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7417 	if (!conn || conn->state != BT_CONNECTED)
7418 		return MGMT_STATUS_NOT_CONNECTED;
7419 
7420 	cmd->user_data = conn;
7421 	hci_cp.handle = cpu_to_le16(conn->handle);
7422 	hci_cp.which = 0x01; /* Piconet clock */
7423 
7424 	return hci_read_clock_sync(hdev, &hci_cp);
7425 }
7426 
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7427 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7428 								u16 len)
7429 {
7430 	struct mgmt_cp_get_clock_info *cp = data;
7431 	struct mgmt_rp_get_clock_info rp;
7432 	struct mgmt_pending_cmd *cmd;
7433 	struct hci_conn *conn;
7434 	int err;
7435 
7436 	bt_dev_dbg(hdev, "sock %p", sk);
7437 
7438 	memset(&rp, 0, sizeof(rp));
7439 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7440 	rp.addr.type = cp->addr.type;
7441 
7442 	if (cp->addr.type != BDADDR_BREDR)
7443 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7444 					 MGMT_STATUS_INVALID_PARAMS,
7445 					 &rp, sizeof(rp));
7446 
7447 	hci_dev_lock(hdev);
7448 
7449 	if (!hdev_is_powered(hdev)) {
7450 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7451 					MGMT_STATUS_NOT_POWERED, &rp,
7452 					sizeof(rp));
7453 		goto unlock;
7454 	}
7455 
7456 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7457 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7458 					       &cp->addr.bdaddr);
7459 		if (!conn || conn->state != BT_CONNECTED) {
7460 			err = mgmt_cmd_complete(sk, hdev->id,
7461 						MGMT_OP_GET_CLOCK_INFO,
7462 						MGMT_STATUS_NOT_CONNECTED,
7463 						&rp, sizeof(rp));
7464 			goto unlock;
7465 		}
7466 	} else {
7467 		conn = NULL;
7468 	}
7469 
7470 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7471 	if (!cmd)
7472 		err = -ENOMEM;
7473 	else
7474 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7475 					 get_clock_info_complete);
7476 
7477 	if (err < 0) {
7478 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7479 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7480 
7481 		if (cmd)
7482 			mgmt_pending_free(cmd);
7483 	}
7484 
7485 
7486 unlock:
7487 	hci_dev_unlock(hdev);
7488 	return err;
7489 }
7490 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7491 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7492 {
7493 	struct hci_conn *conn;
7494 
7495 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7496 	if (!conn)
7497 		return false;
7498 
7499 	if (conn->dst_type != type)
7500 		return false;
7501 
7502 	if (conn->state != BT_CONNECTED)
7503 		return false;
7504 
7505 	return true;
7506 }
7507 
7508 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7509 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7510 			       u8 addr_type, u8 auto_connect)
7511 {
7512 	struct hci_conn_params *params;
7513 
7514 	params = hci_conn_params_add(hdev, addr, addr_type);
7515 	if (!params)
7516 		return -EIO;
7517 
7518 	if (params->auto_connect == auto_connect)
7519 		return 0;
7520 
7521 	hci_pend_le_list_del_init(params);
7522 
7523 	switch (auto_connect) {
7524 	case HCI_AUTO_CONN_DISABLED:
7525 	case HCI_AUTO_CONN_LINK_LOSS:
7526 		/* If auto connect is being disabled when we're trying to
7527 		 * connect to device, keep connecting.
7528 		 */
7529 		if (params->explicit_connect)
7530 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7531 		break;
7532 	case HCI_AUTO_CONN_REPORT:
7533 		if (params->explicit_connect)
7534 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7535 		else
7536 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7537 		break;
7538 	case HCI_AUTO_CONN_DIRECT:
7539 	case HCI_AUTO_CONN_ALWAYS:
7540 		if (!is_connected(hdev, addr, addr_type))
7541 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7542 		break;
7543 	}
7544 
7545 	params->auto_connect = auto_connect;
7546 
7547 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7548 		   addr, addr_type, auto_connect);
7549 
7550 	return 0;
7551 }
7552 
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7553 static void device_added(struct sock *sk, struct hci_dev *hdev,
7554 			 bdaddr_t *bdaddr, u8 type, u8 action)
7555 {
7556 	struct mgmt_ev_device_added ev;
7557 
7558 	bacpy(&ev.addr.bdaddr, bdaddr);
7559 	ev.addr.type = type;
7560 	ev.action = action;
7561 
7562 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7563 }
7564 
add_device_sync(struct hci_dev * hdev,void * data)7565 static int add_device_sync(struct hci_dev *hdev, void *data)
7566 {
7567 	return hci_update_passive_scan_sync(hdev);
7568 }
7569 
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7570 static int add_device(struct sock *sk, struct hci_dev *hdev,
7571 		      void *data, u16 len)
7572 {
7573 	struct mgmt_cp_add_device *cp = data;
7574 	u8 auto_conn, addr_type;
7575 	struct hci_conn_params *params;
7576 	int err;
7577 	u32 current_flags = 0;
7578 	u32 supported_flags;
7579 
7580 	bt_dev_dbg(hdev, "sock %p", sk);
7581 
7582 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7583 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7584 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7585 					 MGMT_STATUS_INVALID_PARAMS,
7586 					 &cp->addr, sizeof(cp->addr));
7587 
7588 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7589 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7590 					 MGMT_STATUS_INVALID_PARAMS,
7591 					 &cp->addr, sizeof(cp->addr));
7592 
7593 	hci_dev_lock(hdev);
7594 
7595 	if (cp->addr.type == BDADDR_BREDR) {
7596 		/* Only incoming connections action is supported for now */
7597 		if (cp->action != 0x01) {
7598 			err = mgmt_cmd_complete(sk, hdev->id,
7599 						MGMT_OP_ADD_DEVICE,
7600 						MGMT_STATUS_INVALID_PARAMS,
7601 						&cp->addr, sizeof(cp->addr));
7602 			goto unlock;
7603 		}
7604 
7605 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7606 						     &cp->addr.bdaddr,
7607 						     cp->addr.type, 0);
7608 		if (err)
7609 			goto unlock;
7610 
7611 		hci_update_scan(hdev);
7612 
7613 		goto added;
7614 	}
7615 
7616 	addr_type = le_addr_type(cp->addr.type);
7617 
7618 	if (cp->action == 0x02)
7619 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7620 	else if (cp->action == 0x01)
7621 		auto_conn = HCI_AUTO_CONN_DIRECT;
7622 	else
7623 		auto_conn = HCI_AUTO_CONN_REPORT;
7624 
7625 	/* Kernel internally uses conn_params with resolvable private
7626 	 * address, but Add Device allows only identity addresses.
7627 	 * Make sure it is enforced before calling
7628 	 * hci_conn_params_lookup.
7629 	 */
7630 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7631 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7632 					MGMT_STATUS_INVALID_PARAMS,
7633 					&cp->addr, sizeof(cp->addr));
7634 		goto unlock;
7635 	}
7636 
7637 	/* If the connection parameters don't exist for this device,
7638 	 * they will be created and configured with defaults.
7639 	 */
7640 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7641 				auto_conn) < 0) {
7642 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7643 					MGMT_STATUS_FAILED, &cp->addr,
7644 					sizeof(cp->addr));
7645 		goto unlock;
7646 	} else {
7647 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7648 						addr_type);
7649 		if (params)
7650 			current_flags = params->flags;
7651 	}
7652 
7653 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7654 	if (err < 0)
7655 		goto unlock;
7656 
7657 added:
7658 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7659 	supported_flags = hdev->conn_flags;
7660 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7661 			     supported_flags, current_flags);
7662 
7663 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7664 				MGMT_STATUS_SUCCESS, &cp->addr,
7665 				sizeof(cp->addr));
7666 
7667 unlock:
7668 	hci_dev_unlock(hdev);
7669 	return err;
7670 }
7671 
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7672 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7673 			   bdaddr_t *bdaddr, u8 type)
7674 {
7675 	struct mgmt_ev_device_removed ev;
7676 
7677 	bacpy(&ev.addr.bdaddr, bdaddr);
7678 	ev.addr.type = type;
7679 
7680 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7681 }
7682 
remove_device_sync(struct hci_dev * hdev,void * data)7683 static int remove_device_sync(struct hci_dev *hdev, void *data)
7684 {
7685 	return hci_update_passive_scan_sync(hdev);
7686 }
7687 
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7688 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7689 			 void *data, u16 len)
7690 {
7691 	struct mgmt_cp_remove_device *cp = data;
7692 	int err;
7693 
7694 	bt_dev_dbg(hdev, "sock %p", sk);
7695 
7696 	hci_dev_lock(hdev);
7697 
7698 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7699 		struct hci_conn_params *params;
7700 		u8 addr_type;
7701 
7702 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7703 			err = mgmt_cmd_complete(sk, hdev->id,
7704 						MGMT_OP_REMOVE_DEVICE,
7705 						MGMT_STATUS_INVALID_PARAMS,
7706 						&cp->addr, sizeof(cp->addr));
7707 			goto unlock;
7708 		}
7709 
7710 		if (cp->addr.type == BDADDR_BREDR) {
7711 			err = hci_bdaddr_list_del(&hdev->accept_list,
7712 						  &cp->addr.bdaddr,
7713 						  cp->addr.type);
7714 			if (err) {
7715 				err = mgmt_cmd_complete(sk, hdev->id,
7716 							MGMT_OP_REMOVE_DEVICE,
7717 							MGMT_STATUS_INVALID_PARAMS,
7718 							&cp->addr,
7719 							sizeof(cp->addr));
7720 				goto unlock;
7721 			}
7722 
7723 			hci_update_scan(hdev);
7724 
7725 			device_removed(sk, hdev, &cp->addr.bdaddr,
7726 				       cp->addr.type);
7727 			goto complete;
7728 		}
7729 
7730 		addr_type = le_addr_type(cp->addr.type);
7731 
7732 		/* Kernel internally uses conn_params with resolvable private
7733 		 * address, but Remove Device allows only identity addresses.
7734 		 * Make sure it is enforced before calling
7735 		 * hci_conn_params_lookup.
7736 		 */
7737 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7738 			err = mgmt_cmd_complete(sk, hdev->id,
7739 						MGMT_OP_REMOVE_DEVICE,
7740 						MGMT_STATUS_INVALID_PARAMS,
7741 						&cp->addr, sizeof(cp->addr));
7742 			goto unlock;
7743 		}
7744 
7745 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7746 						addr_type);
7747 		if (!params) {
7748 			err = mgmt_cmd_complete(sk, hdev->id,
7749 						MGMT_OP_REMOVE_DEVICE,
7750 						MGMT_STATUS_INVALID_PARAMS,
7751 						&cp->addr, sizeof(cp->addr));
7752 			goto unlock;
7753 		}
7754 
7755 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7756 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7757 			err = mgmt_cmd_complete(sk, hdev->id,
7758 						MGMT_OP_REMOVE_DEVICE,
7759 						MGMT_STATUS_INVALID_PARAMS,
7760 						&cp->addr, sizeof(cp->addr));
7761 			goto unlock;
7762 		}
7763 
7764 		hci_conn_params_free(params);
7765 
7766 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7767 	} else {
7768 		struct hci_conn_params *p, *tmp;
7769 		struct bdaddr_list *b, *btmp;
7770 
7771 		if (cp->addr.type) {
7772 			err = mgmt_cmd_complete(sk, hdev->id,
7773 						MGMT_OP_REMOVE_DEVICE,
7774 						MGMT_STATUS_INVALID_PARAMS,
7775 						&cp->addr, sizeof(cp->addr));
7776 			goto unlock;
7777 		}
7778 
7779 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7780 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7781 			list_del(&b->list);
7782 			kfree(b);
7783 		}
7784 
7785 		hci_update_scan(hdev);
7786 
7787 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7788 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7789 				continue;
7790 			device_removed(sk, hdev, &p->addr, p->addr_type);
7791 			if (p->explicit_connect) {
7792 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7793 				continue;
7794 			}
7795 			hci_conn_params_free(p);
7796 		}
7797 
7798 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7799 	}
7800 
7801 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7802 
7803 complete:
7804 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7805 				MGMT_STATUS_SUCCESS, &cp->addr,
7806 				sizeof(cp->addr));
7807 unlock:
7808 	hci_dev_unlock(hdev);
7809 	return err;
7810 }
7811 
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7812 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7813 			   u16 len)
7814 {
7815 	struct mgmt_cp_load_conn_param *cp = data;
7816 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7817 				     sizeof(struct mgmt_conn_param));
7818 	u16 param_count, expected_len;
7819 	int i;
7820 
7821 	if (!lmp_le_capable(hdev))
7822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7823 				       MGMT_STATUS_NOT_SUPPORTED);
7824 
7825 	param_count = __le16_to_cpu(cp->param_count);
7826 	if (param_count > max_param_count) {
7827 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7828 			   param_count);
7829 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7830 				       MGMT_STATUS_INVALID_PARAMS);
7831 	}
7832 
7833 	expected_len = struct_size(cp, params, param_count);
7834 	if (expected_len != len) {
7835 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7836 			   expected_len, len);
7837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7838 				       MGMT_STATUS_INVALID_PARAMS);
7839 	}
7840 
7841 	bt_dev_dbg(hdev, "param_count %u", param_count);
7842 
7843 	hci_dev_lock(hdev);
7844 
7845 	hci_conn_params_clear_disabled(hdev);
7846 
7847 	for (i = 0; i < param_count; i++) {
7848 		struct mgmt_conn_param *param = &cp->params[i];
7849 		struct hci_conn_params *hci_param;
7850 		u16 min, max, latency, timeout;
7851 		u8 addr_type;
7852 
7853 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7854 			   param->addr.type);
7855 
7856 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7857 			addr_type = ADDR_LE_DEV_PUBLIC;
7858 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7859 			addr_type = ADDR_LE_DEV_RANDOM;
7860 		} else {
7861 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7862 			continue;
7863 		}
7864 
7865 		min = le16_to_cpu(param->min_interval);
7866 		max = le16_to_cpu(param->max_interval);
7867 		latency = le16_to_cpu(param->latency);
7868 		timeout = le16_to_cpu(param->timeout);
7869 
7870 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7871 			   min, max, latency, timeout);
7872 
7873 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7874 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7875 			continue;
7876 		}
7877 
7878 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7879 						addr_type);
7880 		if (!hci_param) {
7881 			bt_dev_err(hdev, "failed to add connection parameters");
7882 			continue;
7883 		}
7884 
7885 		hci_param->conn_min_interval = min;
7886 		hci_param->conn_max_interval = max;
7887 		hci_param->conn_latency = latency;
7888 		hci_param->supervision_timeout = timeout;
7889 	}
7890 
7891 	hci_dev_unlock(hdev);
7892 
7893 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7894 				 NULL, 0);
7895 }
7896 
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7897 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7898 			       void *data, u16 len)
7899 {
7900 	struct mgmt_cp_set_external_config *cp = data;
7901 	bool changed;
7902 	int err;
7903 
7904 	bt_dev_dbg(hdev, "sock %p", sk);
7905 
7906 	if (hdev_is_powered(hdev))
7907 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7908 				       MGMT_STATUS_REJECTED);
7909 
7910 	if (cp->config != 0x00 && cp->config != 0x01)
7911 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7912 				         MGMT_STATUS_INVALID_PARAMS);
7913 
7914 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7915 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7916 				       MGMT_STATUS_NOT_SUPPORTED);
7917 
7918 	hci_dev_lock(hdev);
7919 
7920 	if (cp->config)
7921 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7922 	else
7923 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7924 
7925 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7926 	if (err < 0)
7927 		goto unlock;
7928 
7929 	if (!changed)
7930 		goto unlock;
7931 
7932 	err = new_options(hdev, sk);
7933 
7934 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7935 		mgmt_index_removed(hdev);
7936 
7937 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7938 			hci_dev_set_flag(hdev, HCI_CONFIG);
7939 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7940 
7941 			queue_work(hdev->req_workqueue, &hdev->power_on);
7942 		} else {
7943 			set_bit(HCI_RAW, &hdev->flags);
7944 			mgmt_index_added(hdev);
7945 		}
7946 	}
7947 
7948 unlock:
7949 	hci_dev_unlock(hdev);
7950 	return err;
7951 }
7952 
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7953 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7954 			      void *data, u16 len)
7955 {
7956 	struct mgmt_cp_set_public_address *cp = data;
7957 	bool changed;
7958 	int err;
7959 
7960 	bt_dev_dbg(hdev, "sock %p", sk);
7961 
7962 	if (hdev_is_powered(hdev))
7963 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7964 				       MGMT_STATUS_REJECTED);
7965 
7966 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7967 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7968 				       MGMT_STATUS_INVALID_PARAMS);
7969 
7970 	if (!hdev->set_bdaddr)
7971 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7972 				       MGMT_STATUS_NOT_SUPPORTED);
7973 
7974 	hci_dev_lock(hdev);
7975 
7976 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7977 	bacpy(&hdev->public_addr, &cp->bdaddr);
7978 
7979 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7980 	if (err < 0)
7981 		goto unlock;
7982 
7983 	if (!changed)
7984 		goto unlock;
7985 
7986 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7987 		err = new_options(hdev, sk);
7988 
7989 	if (is_configured(hdev)) {
7990 		mgmt_index_removed(hdev);
7991 
7992 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7993 
7994 		hci_dev_set_flag(hdev, HCI_CONFIG);
7995 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7996 
7997 		queue_work(hdev->req_workqueue, &hdev->power_on);
7998 	}
7999 
8000 unlock:
8001 	hci_dev_unlock(hdev);
8002 	return err;
8003 }
8004 
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8005 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8006 					     int err)
8007 {
8008 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8009 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8010 	u8 *h192, *r192, *h256, *r256;
8011 	struct mgmt_pending_cmd *cmd = data;
8012 	struct sk_buff *skb = cmd->skb;
8013 	u8 status = mgmt_status(err);
8014 	u16 eir_len;
8015 
8016 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8017 		return;
8018 
8019 	if (!status) {
8020 		if (!skb)
8021 			status = MGMT_STATUS_FAILED;
8022 		else if (IS_ERR(skb))
8023 			status = mgmt_status(PTR_ERR(skb));
8024 		else
8025 			status = mgmt_status(skb->data[0]);
8026 	}
8027 
8028 	bt_dev_dbg(hdev, "status %u", status);
8029 
8030 	mgmt_cp = cmd->param;
8031 
8032 	if (status) {
8033 		status = mgmt_status(status);
8034 		eir_len = 0;
8035 
8036 		h192 = NULL;
8037 		r192 = NULL;
8038 		h256 = NULL;
8039 		r256 = NULL;
8040 	} else if (!bredr_sc_enabled(hdev)) {
8041 		struct hci_rp_read_local_oob_data *rp;
8042 
8043 		if (skb->len != sizeof(*rp)) {
8044 			status = MGMT_STATUS_FAILED;
8045 			eir_len = 0;
8046 		} else {
8047 			status = MGMT_STATUS_SUCCESS;
8048 			rp = (void *)skb->data;
8049 
8050 			eir_len = 5 + 18 + 18;
8051 			h192 = rp->hash;
8052 			r192 = rp->rand;
8053 			h256 = NULL;
8054 			r256 = NULL;
8055 		}
8056 	} else {
8057 		struct hci_rp_read_local_oob_ext_data *rp;
8058 
8059 		if (skb->len != sizeof(*rp)) {
8060 			status = MGMT_STATUS_FAILED;
8061 			eir_len = 0;
8062 		} else {
8063 			status = MGMT_STATUS_SUCCESS;
8064 			rp = (void *)skb->data;
8065 
8066 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8067 				eir_len = 5 + 18 + 18;
8068 				h192 = NULL;
8069 				r192 = NULL;
8070 			} else {
8071 				eir_len = 5 + 18 + 18 + 18 + 18;
8072 				h192 = rp->hash192;
8073 				r192 = rp->rand192;
8074 			}
8075 
8076 			h256 = rp->hash256;
8077 			r256 = rp->rand256;
8078 		}
8079 	}
8080 
8081 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8082 	if (!mgmt_rp)
8083 		goto done;
8084 
8085 	if (eir_len == 0)
8086 		goto send_rsp;
8087 
8088 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8089 				  hdev->dev_class, 3);
8090 
8091 	if (h192 && r192) {
8092 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8093 					  EIR_SSP_HASH_C192, h192, 16);
8094 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8095 					  EIR_SSP_RAND_R192, r192, 16);
8096 	}
8097 
8098 	if (h256 && r256) {
8099 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8100 					  EIR_SSP_HASH_C256, h256, 16);
8101 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8102 					  EIR_SSP_RAND_R256, r256, 16);
8103 	}
8104 
8105 send_rsp:
8106 	mgmt_rp->type = mgmt_cp->type;
8107 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8108 
8109 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8110 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8111 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8112 	if (err < 0 || status)
8113 		goto done;
8114 
8115 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8116 
8117 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8118 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8119 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8120 done:
8121 	if (skb && !IS_ERR(skb))
8122 		kfree_skb(skb);
8123 
8124 	kfree(mgmt_rp);
8125 	mgmt_pending_remove(cmd);
8126 }
8127 
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8128 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8129 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8130 {
8131 	struct mgmt_pending_cmd *cmd;
8132 	int err;
8133 
8134 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8135 			       cp, sizeof(*cp));
8136 	if (!cmd)
8137 		return -ENOMEM;
8138 
8139 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8140 				 read_local_oob_ext_data_complete);
8141 
8142 	if (err < 0) {
8143 		mgmt_pending_remove(cmd);
8144 		return err;
8145 	}
8146 
8147 	return 0;
8148 }
8149 
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8150 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8151 				   void *data, u16 data_len)
8152 {
8153 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8154 	struct mgmt_rp_read_local_oob_ext_data *rp;
8155 	size_t rp_len;
8156 	u16 eir_len;
8157 	u8 status, flags, role, addr[7], hash[16], rand[16];
8158 	int err;
8159 
8160 	bt_dev_dbg(hdev, "sock %p", sk);
8161 
8162 	if (hdev_is_powered(hdev)) {
8163 		switch (cp->type) {
8164 		case BIT(BDADDR_BREDR):
8165 			status = mgmt_bredr_support(hdev);
8166 			if (status)
8167 				eir_len = 0;
8168 			else
8169 				eir_len = 5;
8170 			break;
8171 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8172 			status = mgmt_le_support(hdev);
8173 			if (status)
8174 				eir_len = 0;
8175 			else
8176 				eir_len = 9 + 3 + 18 + 18 + 3;
8177 			break;
8178 		default:
8179 			status = MGMT_STATUS_INVALID_PARAMS;
8180 			eir_len = 0;
8181 			break;
8182 		}
8183 	} else {
8184 		status = MGMT_STATUS_NOT_POWERED;
8185 		eir_len = 0;
8186 	}
8187 
8188 	rp_len = sizeof(*rp) + eir_len;
8189 	rp = kmalloc(rp_len, GFP_ATOMIC);
8190 	if (!rp)
8191 		return -ENOMEM;
8192 
8193 	if (!status && !lmp_ssp_capable(hdev)) {
8194 		status = MGMT_STATUS_NOT_SUPPORTED;
8195 		eir_len = 0;
8196 	}
8197 
8198 	if (status)
8199 		goto complete;
8200 
8201 	hci_dev_lock(hdev);
8202 
8203 	eir_len = 0;
8204 	switch (cp->type) {
8205 	case BIT(BDADDR_BREDR):
8206 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8207 			err = read_local_ssp_oob_req(hdev, sk, cp);
8208 			hci_dev_unlock(hdev);
8209 			if (!err)
8210 				goto done;
8211 
8212 			status = MGMT_STATUS_FAILED;
8213 			goto complete;
8214 		} else {
8215 			eir_len = eir_append_data(rp->eir, eir_len,
8216 						  EIR_CLASS_OF_DEV,
8217 						  hdev->dev_class, 3);
8218 		}
8219 		break;
8220 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8221 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8222 		    smp_generate_oob(hdev, hash, rand) < 0) {
8223 			hci_dev_unlock(hdev);
8224 			status = MGMT_STATUS_FAILED;
8225 			goto complete;
8226 		}
8227 
8228 		/* This should return the active RPA, but since the RPA
8229 		 * is only programmed on demand, it is really hard to fill
8230 		 * this in at the moment. For now disallow retrieving
8231 		 * local out-of-band data when privacy is in use.
8232 		 *
8233 		 * Returning the identity address will not help here since
8234 		 * pairing happens before the identity resolving key is
8235 		 * known and thus the connection establishment happens
8236 		 * based on the RPA and not the identity address.
8237 		 */
8238 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8239 			hci_dev_unlock(hdev);
8240 			status = MGMT_STATUS_REJECTED;
8241 			goto complete;
8242 		}
8243 
8244 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8245 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8246 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8247 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8248 			memcpy(addr, &hdev->static_addr, 6);
8249 			addr[6] = 0x01;
8250 		} else {
8251 			memcpy(addr, &hdev->bdaddr, 6);
8252 			addr[6] = 0x00;
8253 		}
8254 
8255 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8256 					  addr, sizeof(addr));
8257 
8258 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8259 			role = 0x02;
8260 		else
8261 			role = 0x01;
8262 
8263 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8264 					  &role, sizeof(role));
8265 
8266 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8267 			eir_len = eir_append_data(rp->eir, eir_len,
8268 						  EIR_LE_SC_CONFIRM,
8269 						  hash, sizeof(hash));
8270 
8271 			eir_len = eir_append_data(rp->eir, eir_len,
8272 						  EIR_LE_SC_RANDOM,
8273 						  rand, sizeof(rand));
8274 		}
8275 
8276 		flags = mgmt_get_adv_discov_flags(hdev);
8277 
8278 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8279 			flags |= LE_AD_NO_BREDR;
8280 
8281 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8282 					  &flags, sizeof(flags));
8283 		break;
8284 	}
8285 
8286 	hci_dev_unlock(hdev);
8287 
8288 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8289 
8290 	status = MGMT_STATUS_SUCCESS;
8291 
8292 complete:
8293 	rp->type = cp->type;
8294 	rp->eir_len = cpu_to_le16(eir_len);
8295 
8296 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8297 				status, rp, sizeof(*rp) + eir_len);
8298 	if (err < 0 || status)
8299 		goto done;
8300 
8301 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8302 				 rp, sizeof(*rp) + eir_len,
8303 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8304 
8305 done:
8306 	kfree(rp);
8307 
8308 	return err;
8309 }
8310 
get_supported_adv_flags(struct hci_dev * hdev)8311 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8312 {
8313 	u32 flags = 0;
8314 
8315 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8316 	flags |= MGMT_ADV_FLAG_DISCOV;
8317 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8318 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8319 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8320 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8321 	flags |= MGMT_ADV_PARAM_DURATION;
8322 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8323 	flags |= MGMT_ADV_PARAM_INTERVALS;
8324 	flags |= MGMT_ADV_PARAM_TX_POWER;
8325 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8326 
8327 	/* In extended adv TX_POWER returned from Set Adv Param
8328 	 * will be always valid.
8329 	 */
8330 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8331 		flags |= MGMT_ADV_FLAG_TX_POWER;
8332 
8333 	if (ext_adv_capable(hdev)) {
8334 		flags |= MGMT_ADV_FLAG_SEC_1M;
8335 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8336 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8337 
8338 		if (le_2m_capable(hdev))
8339 			flags |= MGMT_ADV_FLAG_SEC_2M;
8340 
8341 		if (le_coded_capable(hdev))
8342 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8343 	}
8344 
8345 	return flags;
8346 }
8347 
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8348 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8349 			     void *data, u16 data_len)
8350 {
8351 	struct mgmt_rp_read_adv_features *rp;
8352 	size_t rp_len;
8353 	int err;
8354 	struct adv_info *adv_instance;
8355 	u32 supported_flags;
8356 	u8 *instance;
8357 
8358 	bt_dev_dbg(hdev, "sock %p", sk);
8359 
8360 	if (!lmp_le_capable(hdev))
8361 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8362 				       MGMT_STATUS_REJECTED);
8363 
8364 	hci_dev_lock(hdev);
8365 
8366 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8367 	rp = kmalloc(rp_len, GFP_ATOMIC);
8368 	if (!rp) {
8369 		hci_dev_unlock(hdev);
8370 		return -ENOMEM;
8371 	}
8372 
8373 	supported_flags = get_supported_adv_flags(hdev);
8374 
8375 	rp->supported_flags = cpu_to_le32(supported_flags);
8376 	rp->max_adv_data_len = max_adv_len(hdev);
8377 	rp->max_scan_rsp_len = max_adv_len(hdev);
8378 	rp->max_instances = hdev->le_num_of_adv_sets;
8379 	rp->num_instances = hdev->adv_instance_cnt;
8380 
8381 	instance = rp->instance;
8382 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8383 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8384 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8385 			*instance = adv_instance->instance;
8386 			instance++;
8387 		} else {
8388 			rp->num_instances--;
8389 			rp_len--;
8390 		}
8391 	}
8392 
8393 	hci_dev_unlock(hdev);
8394 
8395 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8396 				MGMT_STATUS_SUCCESS, rp, rp_len);
8397 
8398 	kfree(rp);
8399 
8400 	return err;
8401 }
8402 
calculate_name_len(struct hci_dev * hdev)8403 static u8 calculate_name_len(struct hci_dev *hdev)
8404 {
8405 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8406 
8407 	return eir_append_local_name(hdev, buf, 0);
8408 }
8409 
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8410 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8411 			   bool is_adv_data)
8412 {
8413 	u8 max_len = max_adv_len(hdev);
8414 
8415 	if (is_adv_data) {
8416 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8417 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8418 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8419 			max_len -= 3;
8420 
8421 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8422 			max_len -= 3;
8423 	} else {
8424 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8425 			max_len -= calculate_name_len(hdev);
8426 
8427 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8428 			max_len -= 4;
8429 	}
8430 
8431 	return max_len;
8432 }
8433 
flags_managed(u32 adv_flags)8434 static bool flags_managed(u32 adv_flags)
8435 {
8436 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8437 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8438 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8439 }
8440 
tx_power_managed(u32 adv_flags)8441 static bool tx_power_managed(u32 adv_flags)
8442 {
8443 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8444 }
8445 
name_managed(u32 adv_flags)8446 static bool name_managed(u32 adv_flags)
8447 {
8448 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8449 }
8450 
appearance_managed(u32 adv_flags)8451 static bool appearance_managed(u32 adv_flags)
8452 {
8453 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8454 }
8455 
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8456 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8457 			      u8 len, bool is_adv_data)
8458 {
8459 	int i, cur_len;
8460 	u8 max_len;
8461 
8462 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8463 
8464 	if (len > max_len)
8465 		return false;
8466 
8467 	/* Make sure that the data is correctly formatted. */
8468 	for (i = 0; i < len; i += (cur_len + 1)) {
8469 		cur_len = data[i];
8470 
8471 		if (!cur_len)
8472 			continue;
8473 
8474 		if (data[i + 1] == EIR_FLAGS &&
8475 		    (!is_adv_data || flags_managed(adv_flags)))
8476 			return false;
8477 
8478 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8479 			return false;
8480 
8481 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8482 			return false;
8483 
8484 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8485 			return false;
8486 
8487 		if (data[i + 1] == EIR_APPEARANCE &&
8488 		    appearance_managed(adv_flags))
8489 			return false;
8490 
8491 		/* If the current field length would exceed the total data
8492 		 * length, then it's invalid.
8493 		 */
8494 		if (i + cur_len >= len)
8495 			return false;
8496 	}
8497 
8498 	return true;
8499 }
8500 
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8501 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8502 {
8503 	u32 supported_flags, phy_flags;
8504 
8505 	/* The current implementation only supports a subset of the specified
8506 	 * flags. Also need to check mutual exclusiveness of sec flags.
8507 	 */
8508 	supported_flags = get_supported_adv_flags(hdev);
8509 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8510 	if (adv_flags & ~supported_flags ||
8511 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8512 		return false;
8513 
8514 	return true;
8515 }
8516 
adv_busy(struct hci_dev * hdev)8517 static bool adv_busy(struct hci_dev *hdev)
8518 {
8519 	return pending_find(MGMT_OP_SET_LE, hdev);
8520 }
8521 
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8522 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8523 			     int err)
8524 {
8525 	struct adv_info *adv, *n;
8526 
8527 	bt_dev_dbg(hdev, "err %d", err);
8528 
8529 	hci_dev_lock(hdev);
8530 
8531 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8532 		u8 instance;
8533 
8534 		if (!adv->pending)
8535 			continue;
8536 
8537 		if (!err) {
8538 			adv->pending = false;
8539 			continue;
8540 		}
8541 
8542 		instance = adv->instance;
8543 
8544 		if (hdev->cur_adv_instance == instance)
8545 			cancel_adv_timeout(hdev);
8546 
8547 		hci_remove_adv_instance(hdev, instance);
8548 		mgmt_advertising_removed(sk, hdev, instance);
8549 	}
8550 
8551 	hci_dev_unlock(hdev);
8552 }
8553 
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8554 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8555 {
8556 	struct mgmt_pending_cmd *cmd = data;
8557 	struct mgmt_cp_add_advertising *cp = cmd->param;
8558 	struct mgmt_rp_add_advertising rp;
8559 
8560 	memset(&rp, 0, sizeof(rp));
8561 
8562 	rp.instance = cp->instance;
8563 
8564 	if (err)
8565 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8566 				mgmt_status(err));
8567 	else
8568 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8569 				  mgmt_status(err), &rp, sizeof(rp));
8570 
8571 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8572 
8573 	mgmt_pending_free(cmd);
8574 }
8575 
add_advertising_sync(struct hci_dev * hdev,void * data)8576 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8577 {
8578 	struct mgmt_pending_cmd *cmd = data;
8579 	struct mgmt_cp_add_advertising *cp = cmd->param;
8580 
8581 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8582 }
8583 
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8584 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8585 			   void *data, u16 data_len)
8586 {
8587 	struct mgmt_cp_add_advertising *cp = data;
8588 	struct mgmt_rp_add_advertising rp;
8589 	u32 flags;
8590 	u8 status;
8591 	u16 timeout, duration;
8592 	unsigned int prev_instance_cnt;
8593 	u8 schedule_instance = 0;
8594 	struct adv_info *adv, *next_instance;
8595 	int err;
8596 	struct mgmt_pending_cmd *cmd;
8597 
8598 	bt_dev_dbg(hdev, "sock %p", sk);
8599 
8600 	status = mgmt_le_support(hdev);
8601 	if (status)
8602 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8603 				       status);
8604 
8605 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8606 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8607 				       MGMT_STATUS_INVALID_PARAMS);
8608 
8609 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8610 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8611 				       MGMT_STATUS_INVALID_PARAMS);
8612 
8613 	flags = __le32_to_cpu(cp->flags);
8614 	timeout = __le16_to_cpu(cp->timeout);
8615 	duration = __le16_to_cpu(cp->duration);
8616 
8617 	if (!requested_adv_flags_are_valid(hdev, flags))
8618 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8619 				       MGMT_STATUS_INVALID_PARAMS);
8620 
8621 	hci_dev_lock(hdev);
8622 
8623 	if (timeout && !hdev_is_powered(hdev)) {
8624 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8625 				      MGMT_STATUS_REJECTED);
8626 		goto unlock;
8627 	}
8628 
8629 	if (adv_busy(hdev)) {
8630 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8631 				      MGMT_STATUS_BUSY);
8632 		goto unlock;
8633 	}
8634 
8635 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8636 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8637 			       cp->scan_rsp_len, false)) {
8638 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8639 				      MGMT_STATUS_INVALID_PARAMS);
8640 		goto unlock;
8641 	}
8642 
8643 	prev_instance_cnt = hdev->adv_instance_cnt;
8644 
8645 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8646 				   cp->adv_data_len, cp->data,
8647 				   cp->scan_rsp_len,
8648 				   cp->data + cp->adv_data_len,
8649 				   timeout, duration,
8650 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8651 				   hdev->le_adv_min_interval,
8652 				   hdev->le_adv_max_interval, 0);
8653 	if (IS_ERR(adv)) {
8654 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8655 				      MGMT_STATUS_FAILED);
8656 		goto unlock;
8657 	}
8658 
8659 	/* Only trigger an advertising added event if a new instance was
8660 	 * actually added.
8661 	 */
8662 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8663 		mgmt_advertising_added(sk, hdev, cp->instance);
8664 
8665 	if (hdev->cur_adv_instance == cp->instance) {
8666 		/* If the currently advertised instance is being changed then
8667 		 * cancel the current advertising and schedule the next
8668 		 * instance. If there is only one instance then the overridden
8669 		 * advertising data will be visible right away.
8670 		 */
8671 		cancel_adv_timeout(hdev);
8672 
8673 		next_instance = hci_get_next_instance(hdev, cp->instance);
8674 		if (next_instance)
8675 			schedule_instance = next_instance->instance;
8676 	} else if (!hdev->adv_instance_timeout) {
8677 		/* Immediately advertise the new instance if no other
8678 		 * instance is currently being advertised.
8679 		 */
8680 		schedule_instance = cp->instance;
8681 	}
8682 
8683 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8684 	 * there is no instance to be advertised then we have no HCI
8685 	 * communication to make. Simply return.
8686 	 */
8687 	if (!hdev_is_powered(hdev) ||
8688 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8689 	    !schedule_instance) {
8690 		rp.instance = cp->instance;
8691 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8692 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8693 		goto unlock;
8694 	}
8695 
8696 	/* We're good to go, update advertising data, parameters, and start
8697 	 * advertising.
8698 	 */
8699 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8700 			       data_len);
8701 	if (!cmd) {
8702 		err = -ENOMEM;
8703 		goto unlock;
8704 	}
8705 
8706 	cp->instance = schedule_instance;
8707 
8708 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8709 				 add_advertising_complete);
8710 	if (err < 0)
8711 		mgmt_pending_free(cmd);
8712 
8713 unlock:
8714 	hci_dev_unlock(hdev);
8715 
8716 	return err;
8717 }
8718 
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8719 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8720 					int err)
8721 {
8722 	struct mgmt_pending_cmd *cmd = data;
8723 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8724 	struct mgmt_rp_add_ext_adv_params rp;
8725 	struct adv_info *adv;
8726 	u32 flags;
8727 
8728 	BT_DBG("%s", hdev->name);
8729 
8730 	hci_dev_lock(hdev);
8731 
8732 	adv = hci_find_adv_instance(hdev, cp->instance);
8733 	if (!adv)
8734 		goto unlock;
8735 
8736 	rp.instance = cp->instance;
8737 	rp.tx_power = adv->tx_power;
8738 
8739 	/* While we're at it, inform userspace of the available space for this
8740 	 * advertisement, given the flags that will be used.
8741 	 */
8742 	flags = __le32_to_cpu(cp->flags);
8743 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8744 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8745 
8746 	if (err) {
8747 		/* If this advertisement was previously advertising and we
8748 		 * failed to update it, we signal that it has been removed and
8749 		 * delete its structure
8750 		 */
8751 		if (!adv->pending)
8752 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8753 
8754 		hci_remove_adv_instance(hdev, cp->instance);
8755 
8756 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8757 				mgmt_status(err));
8758 	} else {
8759 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8760 				  mgmt_status(err), &rp, sizeof(rp));
8761 	}
8762 
8763 unlock:
8764 	if (cmd)
8765 		mgmt_pending_free(cmd);
8766 
8767 	hci_dev_unlock(hdev);
8768 }
8769 
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8770 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8771 {
8772 	struct mgmt_pending_cmd *cmd = data;
8773 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8774 
8775 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8776 }
8777 
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8778 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8779 			      void *data, u16 data_len)
8780 {
8781 	struct mgmt_cp_add_ext_adv_params *cp = data;
8782 	struct mgmt_rp_add_ext_adv_params rp;
8783 	struct mgmt_pending_cmd *cmd = NULL;
8784 	struct adv_info *adv;
8785 	u32 flags, min_interval, max_interval;
8786 	u16 timeout, duration;
8787 	u8 status;
8788 	s8 tx_power;
8789 	int err;
8790 
8791 	BT_DBG("%s", hdev->name);
8792 
8793 	status = mgmt_le_support(hdev);
8794 	if (status)
8795 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8796 				       status);
8797 
8798 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8799 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8800 				       MGMT_STATUS_INVALID_PARAMS);
8801 
8802 	/* The purpose of breaking add_advertising into two separate MGMT calls
8803 	 * for params and data is to allow more parameters to be added to this
8804 	 * structure in the future. For this reason, we verify that we have the
8805 	 * bare minimum structure we know of when the interface was defined. Any
8806 	 * extra parameters we don't know about will be ignored in this request.
8807 	 */
8808 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8809 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8810 				       MGMT_STATUS_INVALID_PARAMS);
8811 
8812 	flags = __le32_to_cpu(cp->flags);
8813 
8814 	if (!requested_adv_flags_are_valid(hdev, flags))
8815 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8816 				       MGMT_STATUS_INVALID_PARAMS);
8817 
8818 	hci_dev_lock(hdev);
8819 
8820 	/* In new interface, we require that we are powered to register */
8821 	if (!hdev_is_powered(hdev)) {
8822 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8823 				      MGMT_STATUS_REJECTED);
8824 		goto unlock;
8825 	}
8826 
8827 	if (adv_busy(hdev)) {
8828 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8829 				      MGMT_STATUS_BUSY);
8830 		goto unlock;
8831 	}
8832 
8833 	/* Parse defined parameters from request, use defaults otherwise */
8834 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8835 		  __le16_to_cpu(cp->timeout) : 0;
8836 
8837 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8838 		   __le16_to_cpu(cp->duration) :
8839 		   hdev->def_multi_adv_rotation_duration;
8840 
8841 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8842 		       __le32_to_cpu(cp->min_interval) :
8843 		       hdev->le_adv_min_interval;
8844 
8845 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8846 		       __le32_to_cpu(cp->max_interval) :
8847 		       hdev->le_adv_max_interval;
8848 
8849 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8850 		   cp->tx_power :
8851 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8852 
8853 	/* Create advertising instance with no advertising or response data */
8854 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8855 				   timeout, duration, tx_power, min_interval,
8856 				   max_interval, 0);
8857 
8858 	if (IS_ERR(adv)) {
8859 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8860 				      MGMT_STATUS_FAILED);
8861 		goto unlock;
8862 	}
8863 
8864 	/* Submit request for advertising params if ext adv available */
8865 	if (ext_adv_capable(hdev)) {
8866 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8867 				       data, data_len);
8868 		if (!cmd) {
8869 			err = -ENOMEM;
8870 			hci_remove_adv_instance(hdev, cp->instance);
8871 			goto unlock;
8872 		}
8873 
8874 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8875 					 add_ext_adv_params_complete);
8876 		if (err < 0)
8877 			mgmt_pending_free(cmd);
8878 	} else {
8879 		rp.instance = cp->instance;
8880 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8881 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8882 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8883 		err = mgmt_cmd_complete(sk, hdev->id,
8884 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8885 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8886 	}
8887 
8888 unlock:
8889 	hci_dev_unlock(hdev);
8890 
8891 	return err;
8892 }
8893 
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8894 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8895 {
8896 	struct mgmt_pending_cmd *cmd = data;
8897 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8898 	struct mgmt_rp_add_advertising rp;
8899 
8900 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8901 
8902 	memset(&rp, 0, sizeof(rp));
8903 
8904 	rp.instance = cp->instance;
8905 
8906 	if (err)
8907 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8908 				mgmt_status(err));
8909 	else
8910 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8911 				  mgmt_status(err), &rp, sizeof(rp));
8912 
8913 	mgmt_pending_free(cmd);
8914 }
8915 
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8916 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8917 {
8918 	struct mgmt_pending_cmd *cmd = data;
8919 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8920 	int err;
8921 
8922 	if (ext_adv_capable(hdev)) {
8923 		err = hci_update_adv_data_sync(hdev, cp->instance);
8924 		if (err)
8925 			return err;
8926 
8927 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8928 		if (err)
8929 			return err;
8930 
8931 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8932 	}
8933 
8934 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8935 }
8936 
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8937 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8938 			    u16 data_len)
8939 {
8940 	struct mgmt_cp_add_ext_adv_data *cp = data;
8941 	struct mgmt_rp_add_ext_adv_data rp;
8942 	u8 schedule_instance = 0;
8943 	struct adv_info *next_instance;
8944 	struct adv_info *adv_instance;
8945 	int err = 0;
8946 	struct mgmt_pending_cmd *cmd;
8947 
8948 	BT_DBG("%s", hdev->name);
8949 
8950 	hci_dev_lock(hdev);
8951 
8952 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8953 
8954 	if (!adv_instance) {
8955 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8956 				      MGMT_STATUS_INVALID_PARAMS);
8957 		goto unlock;
8958 	}
8959 
8960 	/* In new interface, we require that we are powered to register */
8961 	if (!hdev_is_powered(hdev)) {
8962 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8963 				      MGMT_STATUS_REJECTED);
8964 		goto clear_new_instance;
8965 	}
8966 
8967 	if (adv_busy(hdev)) {
8968 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8969 				      MGMT_STATUS_BUSY);
8970 		goto clear_new_instance;
8971 	}
8972 
8973 	/* Validate new data */
8974 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8975 			       cp->adv_data_len, true) ||
8976 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8977 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8978 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8979 				      MGMT_STATUS_INVALID_PARAMS);
8980 		goto clear_new_instance;
8981 	}
8982 
8983 	/* Set the data in the advertising instance */
8984 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8985 				  cp->data, cp->scan_rsp_len,
8986 				  cp->data + cp->adv_data_len);
8987 
8988 	/* If using software rotation, determine next instance to use */
8989 	if (hdev->cur_adv_instance == cp->instance) {
8990 		/* If the currently advertised instance is being changed
8991 		 * then cancel the current advertising and schedule the
8992 		 * next instance. If there is only one instance then the
8993 		 * overridden advertising data will be visible right
8994 		 * away
8995 		 */
8996 		cancel_adv_timeout(hdev);
8997 
8998 		next_instance = hci_get_next_instance(hdev, cp->instance);
8999 		if (next_instance)
9000 			schedule_instance = next_instance->instance;
9001 	} else if (!hdev->adv_instance_timeout) {
9002 		/* Immediately advertise the new instance if no other
9003 		 * instance is currently being advertised.
9004 		 */
9005 		schedule_instance = cp->instance;
9006 	}
9007 
9008 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9009 	 * be advertised then we have no HCI communication to make.
9010 	 * Simply return.
9011 	 */
9012 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9013 		if (adv_instance->pending) {
9014 			mgmt_advertising_added(sk, hdev, cp->instance);
9015 			adv_instance->pending = false;
9016 		}
9017 		rp.instance = cp->instance;
9018 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9019 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9020 		goto unlock;
9021 	}
9022 
9023 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9024 			       data_len);
9025 	if (!cmd) {
9026 		err = -ENOMEM;
9027 		goto clear_new_instance;
9028 	}
9029 
9030 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9031 				 add_ext_adv_data_complete);
9032 	if (err < 0) {
9033 		mgmt_pending_free(cmd);
9034 		goto clear_new_instance;
9035 	}
9036 
9037 	/* We were successful in updating data, so trigger advertising_added
9038 	 * event if this is an instance that wasn't previously advertising. If
9039 	 * a failure occurs in the requests we initiated, we will remove the
9040 	 * instance again in add_advertising_complete
9041 	 */
9042 	if (adv_instance->pending)
9043 		mgmt_advertising_added(sk, hdev, cp->instance);
9044 
9045 	goto unlock;
9046 
9047 clear_new_instance:
9048 	hci_remove_adv_instance(hdev, cp->instance);
9049 
9050 unlock:
9051 	hci_dev_unlock(hdev);
9052 
9053 	return err;
9054 }
9055 
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9056 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9057 					int err)
9058 {
9059 	struct mgmt_pending_cmd *cmd = data;
9060 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9061 	struct mgmt_rp_remove_advertising rp;
9062 
9063 	bt_dev_dbg(hdev, "err %d", err);
9064 
9065 	memset(&rp, 0, sizeof(rp));
9066 	rp.instance = cp->instance;
9067 
9068 	if (err)
9069 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9070 				mgmt_status(err));
9071 	else
9072 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9073 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9074 
9075 	mgmt_pending_free(cmd);
9076 }
9077 
remove_advertising_sync(struct hci_dev * hdev,void * data)9078 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9079 {
9080 	struct mgmt_pending_cmd *cmd = data;
9081 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9082 	int err;
9083 
9084 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9085 	if (err)
9086 		return err;
9087 
9088 	if (list_empty(&hdev->adv_instances))
9089 		err = hci_disable_advertising_sync(hdev);
9090 
9091 	return err;
9092 }
9093 
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9094 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9095 			      void *data, u16 data_len)
9096 {
9097 	struct mgmt_cp_remove_advertising *cp = data;
9098 	struct mgmt_pending_cmd *cmd;
9099 	int err;
9100 
9101 	bt_dev_dbg(hdev, "sock %p", sk);
9102 
9103 	hci_dev_lock(hdev);
9104 
9105 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9106 		err = mgmt_cmd_status(sk, hdev->id,
9107 				      MGMT_OP_REMOVE_ADVERTISING,
9108 				      MGMT_STATUS_INVALID_PARAMS);
9109 		goto unlock;
9110 	}
9111 
9112 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9113 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9114 				      MGMT_STATUS_BUSY);
9115 		goto unlock;
9116 	}
9117 
9118 	if (list_empty(&hdev->adv_instances)) {
9119 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9120 				      MGMT_STATUS_INVALID_PARAMS);
9121 		goto unlock;
9122 	}
9123 
9124 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9125 			       data_len);
9126 	if (!cmd) {
9127 		err = -ENOMEM;
9128 		goto unlock;
9129 	}
9130 
9131 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9132 				 remove_advertising_complete);
9133 	if (err < 0)
9134 		mgmt_pending_free(cmd);
9135 
9136 unlock:
9137 	hci_dev_unlock(hdev);
9138 
9139 	return err;
9140 }
9141 
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9142 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9143 			     void *data, u16 data_len)
9144 {
9145 	struct mgmt_cp_get_adv_size_info *cp = data;
9146 	struct mgmt_rp_get_adv_size_info rp;
9147 	u32 flags, supported_flags;
9148 
9149 	bt_dev_dbg(hdev, "sock %p", sk);
9150 
9151 	if (!lmp_le_capable(hdev))
9152 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9153 				       MGMT_STATUS_REJECTED);
9154 
9155 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9156 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9157 				       MGMT_STATUS_INVALID_PARAMS);
9158 
9159 	flags = __le32_to_cpu(cp->flags);
9160 
9161 	/* The current implementation only supports a subset of the specified
9162 	 * flags.
9163 	 */
9164 	supported_flags = get_supported_adv_flags(hdev);
9165 	if (flags & ~supported_flags)
9166 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9167 				       MGMT_STATUS_INVALID_PARAMS);
9168 
9169 	rp.instance = cp->instance;
9170 	rp.flags = cp->flags;
9171 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9172 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9173 
9174 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9175 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9176 }
9177 
9178 static const struct hci_mgmt_handler mgmt_handlers[] = {
9179 	{ NULL }, /* 0x0000 (no command) */
9180 	{ read_version,            MGMT_READ_VERSION_SIZE,
9181 						HCI_MGMT_NO_HDEV |
9182 						HCI_MGMT_UNTRUSTED },
9183 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9184 						HCI_MGMT_NO_HDEV |
9185 						HCI_MGMT_UNTRUSTED },
9186 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9187 						HCI_MGMT_NO_HDEV |
9188 						HCI_MGMT_UNTRUSTED },
9189 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9190 						HCI_MGMT_UNTRUSTED },
9191 	{ set_powered,             MGMT_SETTING_SIZE },
9192 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9193 	{ set_connectable,         MGMT_SETTING_SIZE },
9194 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9195 	{ set_bondable,            MGMT_SETTING_SIZE },
9196 	{ set_link_security,       MGMT_SETTING_SIZE },
9197 	{ set_ssp,                 MGMT_SETTING_SIZE },
9198 	{ set_hs,                  MGMT_SETTING_SIZE },
9199 	{ set_le,                  MGMT_SETTING_SIZE },
9200 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9201 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9202 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9203 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9204 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9205 						HCI_MGMT_VAR_LEN },
9206 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9207 						HCI_MGMT_VAR_LEN },
9208 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9209 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9210 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9211 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9212 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9213 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9214 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9215 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9216 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9217 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9218 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9219 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9220 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9221 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9222 						HCI_MGMT_VAR_LEN },
9223 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9224 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9225 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9226 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9227 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9228 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9229 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9230 	{ set_advertising,         MGMT_SETTING_SIZE },
9231 	{ set_bredr,               MGMT_SETTING_SIZE },
9232 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9233 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9234 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9235 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9236 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9237 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9238 						HCI_MGMT_VAR_LEN },
9239 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9240 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9241 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9242 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9243 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9244 						HCI_MGMT_VAR_LEN },
9245 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9246 						HCI_MGMT_NO_HDEV |
9247 						HCI_MGMT_UNTRUSTED },
9248 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9249 						HCI_MGMT_UNCONFIGURED |
9250 						HCI_MGMT_UNTRUSTED },
9251 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9252 						HCI_MGMT_UNCONFIGURED },
9253 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9254 						HCI_MGMT_UNCONFIGURED },
9255 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9256 						HCI_MGMT_VAR_LEN },
9257 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9258 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9259 						HCI_MGMT_NO_HDEV |
9260 						HCI_MGMT_UNTRUSTED },
9261 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9262 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9263 						HCI_MGMT_VAR_LEN },
9264 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9265 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9266 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9267 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9268 						HCI_MGMT_UNTRUSTED },
9269 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9270 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9271 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9272 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9273 						HCI_MGMT_VAR_LEN },
9274 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9275 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9276 						HCI_MGMT_UNTRUSTED },
9277 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9278 						HCI_MGMT_UNTRUSTED |
9279 						HCI_MGMT_HDEV_OPTIONAL },
9280 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9281 						HCI_MGMT_VAR_LEN |
9282 						HCI_MGMT_HDEV_OPTIONAL },
9283 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9284 						HCI_MGMT_UNTRUSTED },
9285 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9286 						HCI_MGMT_VAR_LEN },
9287 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9288 						HCI_MGMT_UNTRUSTED },
9289 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9290 						HCI_MGMT_VAR_LEN },
9291 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9292 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9293 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9294 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9295 						HCI_MGMT_VAR_LEN },
9296 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9297 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9298 						HCI_MGMT_VAR_LEN },
9299 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9300 						HCI_MGMT_VAR_LEN },
9301 	{ add_adv_patterns_monitor_rssi,
9302 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9303 						HCI_MGMT_VAR_LEN },
9304 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9305 						HCI_MGMT_VAR_LEN },
9306 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9307 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9308 						HCI_MGMT_VAR_LEN },
9309 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9310 };
9311 
mgmt_index_added(struct hci_dev * hdev)9312 void mgmt_index_added(struct hci_dev *hdev)
9313 {
9314 	struct mgmt_ev_ext_index ev;
9315 
9316 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9317 		return;
9318 
9319 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9320 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9321 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9322 		ev.type = 0x01;
9323 	} else {
9324 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9325 				 HCI_MGMT_INDEX_EVENTS);
9326 		ev.type = 0x00;
9327 	}
9328 
9329 	ev.bus = hdev->bus;
9330 
9331 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9332 			 HCI_MGMT_EXT_INDEX_EVENTS);
9333 }
9334 
mgmt_index_removed(struct hci_dev * hdev)9335 void mgmt_index_removed(struct hci_dev *hdev)
9336 {
9337 	struct mgmt_ev_ext_index ev;
9338 	u8 status = MGMT_STATUS_INVALID_INDEX;
9339 
9340 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9341 		return;
9342 
9343 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9344 
9345 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9346 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9347 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9348 		ev.type = 0x01;
9349 	} else {
9350 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9351 				 HCI_MGMT_INDEX_EVENTS);
9352 		ev.type = 0x00;
9353 	}
9354 
9355 	ev.bus = hdev->bus;
9356 
9357 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9358 			 HCI_MGMT_EXT_INDEX_EVENTS);
9359 
9360 	/* Cancel any remaining timed work */
9361 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9362 		return;
9363 	cancel_delayed_work_sync(&hdev->discov_off);
9364 	cancel_delayed_work_sync(&hdev->service_cache);
9365 	cancel_delayed_work_sync(&hdev->rpa_expired);
9366 }
9367 
mgmt_power_on(struct hci_dev * hdev,int err)9368 void mgmt_power_on(struct hci_dev *hdev, int err)
9369 {
9370 	struct cmd_lookup match = { NULL, hdev };
9371 
9372 	bt_dev_dbg(hdev, "err %d", err);
9373 
9374 	hci_dev_lock(hdev);
9375 
9376 	if (!err) {
9377 		restart_le_actions(hdev);
9378 		hci_update_passive_scan(hdev);
9379 	}
9380 
9381 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9382 
9383 	new_settings(hdev, match.sk);
9384 
9385 	if (match.sk)
9386 		sock_put(match.sk);
9387 
9388 	hci_dev_unlock(hdev);
9389 }
9390 
__mgmt_power_off(struct hci_dev * hdev)9391 void __mgmt_power_off(struct hci_dev *hdev)
9392 {
9393 	struct cmd_lookup match = { NULL, hdev };
9394 	u8 status, zero_cod[] = { 0, 0, 0 };
9395 
9396 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9397 
9398 	/* If the power off is because of hdev unregistration let
9399 	 * use the appropriate INVALID_INDEX status. Otherwise use
9400 	 * NOT_POWERED. We cover both scenarios here since later in
9401 	 * mgmt_index_removed() any hci_conn callbacks will have already
9402 	 * been triggered, potentially causing misleading DISCONNECTED
9403 	 * status responses.
9404 	 */
9405 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9406 		status = MGMT_STATUS_INVALID_INDEX;
9407 	else
9408 		status = MGMT_STATUS_NOT_POWERED;
9409 
9410 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9411 
9412 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9413 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9414 				   zero_cod, sizeof(zero_cod),
9415 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9416 		ext_info_changed(hdev, NULL);
9417 	}
9418 
9419 	new_settings(hdev, match.sk);
9420 
9421 	if (match.sk)
9422 		sock_put(match.sk);
9423 }
9424 
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9425 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9426 {
9427 	struct mgmt_pending_cmd *cmd;
9428 	u8 status;
9429 
9430 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9431 	if (!cmd)
9432 		return;
9433 
9434 	if (err == -ERFKILL)
9435 		status = MGMT_STATUS_RFKILLED;
9436 	else
9437 		status = MGMT_STATUS_FAILED;
9438 
9439 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9440 
9441 	mgmt_pending_remove(cmd);
9442 }
9443 
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9444 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9445 		       bool persistent)
9446 {
9447 	struct mgmt_ev_new_link_key ev;
9448 
9449 	memset(&ev, 0, sizeof(ev));
9450 
9451 	ev.store_hint = persistent;
9452 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9453 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9454 	ev.key.type = key->type;
9455 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9456 	ev.key.pin_len = key->pin_len;
9457 
9458 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9459 }
9460 
mgmt_ltk_type(struct smp_ltk * ltk)9461 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9462 {
9463 	switch (ltk->type) {
9464 	case SMP_LTK:
9465 	case SMP_LTK_RESPONDER:
9466 		if (ltk->authenticated)
9467 			return MGMT_LTK_AUTHENTICATED;
9468 		return MGMT_LTK_UNAUTHENTICATED;
9469 	case SMP_LTK_P256:
9470 		if (ltk->authenticated)
9471 			return MGMT_LTK_P256_AUTH;
9472 		return MGMT_LTK_P256_UNAUTH;
9473 	case SMP_LTK_P256_DEBUG:
9474 		return MGMT_LTK_P256_DEBUG;
9475 	}
9476 
9477 	return MGMT_LTK_UNAUTHENTICATED;
9478 }
9479 
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9480 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9481 {
9482 	struct mgmt_ev_new_long_term_key ev;
9483 
9484 	memset(&ev, 0, sizeof(ev));
9485 
9486 	/* Devices using resolvable or non-resolvable random addresses
9487 	 * without providing an identity resolving key don't require
9488 	 * to store long term keys. Their addresses will change the
9489 	 * next time around.
9490 	 *
9491 	 * Only when a remote device provides an identity address
9492 	 * make sure the long term key is stored. If the remote
9493 	 * identity is known, the long term keys are internally
9494 	 * mapped to the identity address. So allow static random
9495 	 * and public addresses here.
9496 	 */
9497 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9498 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9499 		ev.store_hint = 0x00;
9500 	else
9501 		ev.store_hint = persistent;
9502 
9503 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9504 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9505 	ev.key.type = mgmt_ltk_type(key);
9506 	ev.key.enc_size = key->enc_size;
9507 	ev.key.ediv = key->ediv;
9508 	ev.key.rand = key->rand;
9509 
9510 	if (key->type == SMP_LTK)
9511 		ev.key.initiator = 1;
9512 
9513 	/* Make sure we copy only the significant bytes based on the
9514 	 * encryption key size, and set the rest of the value to zeroes.
9515 	 */
9516 	memcpy(ev.key.val, key->val, key->enc_size);
9517 	memset(ev.key.val + key->enc_size, 0,
9518 	       sizeof(ev.key.val) - key->enc_size);
9519 
9520 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9521 }
9522 
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9523 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9524 {
9525 	struct mgmt_ev_new_irk ev;
9526 
9527 	memset(&ev, 0, sizeof(ev));
9528 
9529 	ev.store_hint = persistent;
9530 
9531 	bacpy(&ev.rpa, &irk->rpa);
9532 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9533 	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9534 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9535 
9536 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9537 }
9538 
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9539 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9540 		   bool persistent)
9541 {
9542 	struct mgmt_ev_new_csrk ev;
9543 
9544 	memset(&ev, 0, sizeof(ev));
9545 
9546 	/* Devices using resolvable or non-resolvable random addresses
9547 	 * without providing an identity resolving key don't require
9548 	 * to store signature resolving keys. Their addresses will change
9549 	 * the next time around.
9550 	 *
9551 	 * Only when a remote device provides an identity address
9552 	 * make sure the signature resolving key is stored. So allow
9553 	 * static random and public addresses here.
9554 	 */
9555 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9556 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9557 		ev.store_hint = 0x00;
9558 	else
9559 		ev.store_hint = persistent;
9560 
9561 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9562 	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9563 	ev.key.type = csrk->type;
9564 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9565 
9566 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9567 }
9568 
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9569 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9570 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9571 			 u16 max_interval, u16 latency, u16 timeout)
9572 {
9573 	struct mgmt_ev_new_conn_param ev;
9574 
9575 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9576 		return;
9577 
9578 	memset(&ev, 0, sizeof(ev));
9579 	bacpy(&ev.addr.bdaddr, bdaddr);
9580 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9581 	ev.store_hint = store_hint;
9582 	ev.min_interval = cpu_to_le16(min_interval);
9583 	ev.max_interval = cpu_to_le16(max_interval);
9584 	ev.latency = cpu_to_le16(latency);
9585 	ev.timeout = cpu_to_le16(timeout);
9586 
9587 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9588 }
9589 
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9590 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9591 			   u8 *name, u8 name_len)
9592 {
9593 	struct sk_buff *skb;
9594 	struct mgmt_ev_device_connected *ev;
9595 	u16 eir_len = 0;
9596 	u32 flags = 0;
9597 
9598 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9599 		return;
9600 
9601 	/* allocate buff for LE or BR/EDR adv */
9602 	if (conn->le_adv_data_len > 0)
9603 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9604 				     sizeof(*ev) + conn->le_adv_data_len);
9605 	else
9606 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9607 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9608 				     eir_precalc_len(sizeof(conn->dev_class)));
9609 
9610 	ev = skb_put(skb, sizeof(*ev));
9611 	bacpy(&ev->addr.bdaddr, &conn->dst);
9612 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9613 
9614 	if (conn->out)
9615 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9616 
9617 	ev->flags = __cpu_to_le32(flags);
9618 
9619 	/* We must ensure that the EIR Data fields are ordered and
9620 	 * unique. Keep it simple for now and avoid the problem by not
9621 	 * adding any BR/EDR data to the LE adv.
9622 	 */
9623 	if (conn->le_adv_data_len > 0) {
9624 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9625 		eir_len = conn->le_adv_data_len;
9626 	} else {
9627 		if (name)
9628 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9629 
9630 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9631 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9632 						    conn->dev_class, sizeof(conn->dev_class));
9633 	}
9634 
9635 	ev->eir_len = cpu_to_le16(eir_len);
9636 
9637 	mgmt_event_skb(skb, NULL);
9638 }
9639 
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)9640 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9641 {
9642 	struct sock **sk = data;
9643 
9644 	cmd->cmd_complete(cmd, 0);
9645 
9646 	*sk = cmd->sk;
9647 	sock_hold(*sk);
9648 
9649 	mgmt_pending_remove(cmd);
9650 }
9651 
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9652 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9653 {
9654 	struct hci_dev *hdev = data;
9655 	struct mgmt_cp_unpair_device *cp = cmd->param;
9656 
9657 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9658 
9659 	cmd->cmd_complete(cmd, 0);
9660 	mgmt_pending_remove(cmd);
9661 }
9662 
mgmt_powering_down(struct hci_dev * hdev)9663 bool mgmt_powering_down(struct hci_dev *hdev)
9664 {
9665 	struct mgmt_pending_cmd *cmd;
9666 	struct mgmt_mode *cp;
9667 
9668 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9669 	if (!cmd)
9670 		return false;
9671 
9672 	cp = cmd->param;
9673 	if (!cp->val)
9674 		return true;
9675 
9676 	return false;
9677 }
9678 
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9679 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9680 			      u8 link_type, u8 addr_type, u8 reason,
9681 			      bool mgmt_connected)
9682 {
9683 	struct mgmt_ev_device_disconnected ev;
9684 	struct sock *sk = NULL;
9685 
9686 	if (!mgmt_connected)
9687 		return;
9688 
9689 	if (link_type != ACL_LINK && link_type != LE_LINK)
9690 		return;
9691 
9692 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9693 
9694 	bacpy(&ev.addr.bdaddr, bdaddr);
9695 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9696 	ev.reason = reason;
9697 
9698 	/* Report disconnects due to suspend */
9699 	if (hdev->suspended)
9700 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9701 
9702 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9703 
9704 	if (sk)
9705 		sock_put(sk);
9706 
9707 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9708 			     hdev);
9709 }
9710 
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9711 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9712 			    u8 link_type, u8 addr_type, u8 status)
9713 {
9714 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9715 	struct mgmt_cp_disconnect *cp;
9716 	struct mgmt_pending_cmd *cmd;
9717 
9718 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9719 			     hdev);
9720 
9721 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9722 	if (!cmd)
9723 		return;
9724 
9725 	cp = cmd->param;
9726 
9727 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9728 		return;
9729 
9730 	if (cp->addr.type != bdaddr_type)
9731 		return;
9732 
9733 	cmd->cmd_complete(cmd, mgmt_status(status));
9734 	mgmt_pending_remove(cmd);
9735 }
9736 
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9737 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9738 			 u8 addr_type, u8 status)
9739 {
9740 	struct mgmt_ev_connect_failed ev;
9741 
9742 	bacpy(&ev.addr.bdaddr, bdaddr);
9743 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9744 	ev.status = mgmt_status(status);
9745 
9746 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9747 }
9748 
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9749 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9750 {
9751 	struct mgmt_ev_pin_code_request ev;
9752 
9753 	bacpy(&ev.addr.bdaddr, bdaddr);
9754 	ev.addr.type = BDADDR_BREDR;
9755 	ev.secure = secure;
9756 
9757 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9758 }
9759 
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9760 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9761 				  u8 status)
9762 {
9763 	struct mgmt_pending_cmd *cmd;
9764 
9765 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9766 	if (!cmd)
9767 		return;
9768 
9769 	cmd->cmd_complete(cmd, mgmt_status(status));
9770 	mgmt_pending_remove(cmd);
9771 }
9772 
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9773 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9774 				      u8 status)
9775 {
9776 	struct mgmt_pending_cmd *cmd;
9777 
9778 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9779 	if (!cmd)
9780 		return;
9781 
9782 	cmd->cmd_complete(cmd, mgmt_status(status));
9783 	mgmt_pending_remove(cmd);
9784 }
9785 
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9786 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9787 			      u8 link_type, u8 addr_type, u32 value,
9788 			      u8 confirm_hint)
9789 {
9790 	struct mgmt_ev_user_confirm_request ev;
9791 
9792 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9793 
9794 	bacpy(&ev.addr.bdaddr, bdaddr);
9795 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9796 	ev.confirm_hint = confirm_hint;
9797 	ev.value = cpu_to_le32(value);
9798 
9799 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9800 			  NULL);
9801 }
9802 
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9803 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9804 			      u8 link_type, u8 addr_type)
9805 {
9806 	struct mgmt_ev_user_passkey_request ev;
9807 
9808 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9809 
9810 	bacpy(&ev.addr.bdaddr, bdaddr);
9811 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9812 
9813 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9814 			  NULL);
9815 }
9816 
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9817 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9818 				      u8 link_type, u8 addr_type, u8 status,
9819 				      u8 opcode)
9820 {
9821 	struct mgmt_pending_cmd *cmd;
9822 
9823 	cmd = pending_find(opcode, hdev);
9824 	if (!cmd)
9825 		return -ENOENT;
9826 
9827 	cmd->cmd_complete(cmd, mgmt_status(status));
9828 	mgmt_pending_remove(cmd);
9829 
9830 	return 0;
9831 }
9832 
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9833 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 				     u8 link_type, u8 addr_type, u8 status)
9835 {
9836 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9837 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9838 }
9839 
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9840 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9841 					 u8 link_type, u8 addr_type, u8 status)
9842 {
9843 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9844 					  status,
9845 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9846 }
9847 
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9848 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9849 				     u8 link_type, u8 addr_type, u8 status)
9850 {
9851 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9852 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9853 }
9854 
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9855 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9856 					 u8 link_type, u8 addr_type, u8 status)
9857 {
9858 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9859 					  status,
9860 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9861 }
9862 
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9863 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9864 			     u8 link_type, u8 addr_type, u32 passkey,
9865 			     u8 entered)
9866 {
9867 	struct mgmt_ev_passkey_notify ev;
9868 
9869 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9870 
9871 	bacpy(&ev.addr.bdaddr, bdaddr);
9872 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9873 	ev.passkey = __cpu_to_le32(passkey);
9874 	ev.entered = entered;
9875 
9876 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9877 }
9878 
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9879 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9880 {
9881 	struct mgmt_ev_auth_failed ev;
9882 	struct mgmt_pending_cmd *cmd;
9883 	u8 status = mgmt_status(hci_status);
9884 
9885 	bacpy(&ev.addr.bdaddr, &conn->dst);
9886 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9887 	ev.status = status;
9888 
9889 	cmd = find_pairing(conn);
9890 
9891 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9892 		    cmd ? cmd->sk : NULL);
9893 
9894 	if (cmd) {
9895 		cmd->cmd_complete(cmd, status);
9896 		mgmt_pending_remove(cmd);
9897 	}
9898 }
9899 
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9900 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9901 {
9902 	struct cmd_lookup match = { NULL, hdev };
9903 	bool changed;
9904 
9905 	if (status) {
9906 		u8 mgmt_err = mgmt_status(status);
9907 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9908 				     cmd_status_rsp, &mgmt_err);
9909 		return;
9910 	}
9911 
9912 	if (test_bit(HCI_AUTH, &hdev->flags))
9913 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9914 	else
9915 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9916 
9917 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9918 			     &match);
9919 
9920 	if (changed)
9921 		new_settings(hdev, match.sk);
9922 
9923 	if (match.sk)
9924 		sock_put(match.sk);
9925 }
9926 
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9927 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9928 {
9929 	struct cmd_lookup *match = data;
9930 
9931 	if (match->sk == NULL) {
9932 		match->sk = cmd->sk;
9933 		sock_hold(match->sk);
9934 	}
9935 }
9936 
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9937 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9938 				    u8 status)
9939 {
9940 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9941 
9942 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9943 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9944 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9945 
9946 	if (!status) {
9947 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9948 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9949 		ext_info_changed(hdev, NULL);
9950 	}
9951 
9952 	if (match.sk)
9953 		sock_put(match.sk);
9954 }
9955 
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9956 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9957 {
9958 	struct mgmt_cp_set_local_name ev;
9959 	struct mgmt_pending_cmd *cmd;
9960 
9961 	if (status)
9962 		return;
9963 
9964 	memset(&ev, 0, sizeof(ev));
9965 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9966 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9967 
9968 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9969 	if (!cmd) {
9970 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9971 
9972 		/* If this is a HCI command related to powering on the
9973 		 * HCI dev don't send any mgmt signals.
9974 		 */
9975 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9976 			return;
9977 	}
9978 
9979 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9980 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9981 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9982 }
9983 
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])9984 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9985 {
9986 	int i;
9987 
9988 	for (i = 0; i < uuid_count; i++) {
9989 		if (!memcmp(uuid, uuids[i], 16))
9990 			return true;
9991 	}
9992 
9993 	return false;
9994 }
9995 
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])9996 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9997 {
9998 	u16 parsed = 0;
9999 
10000 	while (parsed < eir_len) {
10001 		u8 field_len = eir[0];
10002 		u8 uuid[16];
10003 		int i;
10004 
10005 		if (field_len == 0)
10006 			break;
10007 
10008 		if (eir_len - parsed < field_len + 1)
10009 			break;
10010 
10011 		switch (eir[1]) {
10012 		case EIR_UUID16_ALL:
10013 		case EIR_UUID16_SOME:
10014 			for (i = 0; i + 3 <= field_len; i += 2) {
10015 				memcpy(uuid, bluetooth_base_uuid, 16);
10016 				uuid[13] = eir[i + 3];
10017 				uuid[12] = eir[i + 2];
10018 				if (has_uuid(uuid, uuid_count, uuids))
10019 					return true;
10020 			}
10021 			break;
10022 		case EIR_UUID32_ALL:
10023 		case EIR_UUID32_SOME:
10024 			for (i = 0; i + 5 <= field_len; i += 4) {
10025 				memcpy(uuid, bluetooth_base_uuid, 16);
10026 				uuid[15] = eir[i + 5];
10027 				uuid[14] = eir[i + 4];
10028 				uuid[13] = eir[i + 3];
10029 				uuid[12] = eir[i + 2];
10030 				if (has_uuid(uuid, uuid_count, uuids))
10031 					return true;
10032 			}
10033 			break;
10034 		case EIR_UUID128_ALL:
10035 		case EIR_UUID128_SOME:
10036 			for (i = 0; i + 17 <= field_len; i += 16) {
10037 				memcpy(uuid, eir + i + 2, 16);
10038 				if (has_uuid(uuid, uuid_count, uuids))
10039 					return true;
10040 			}
10041 			break;
10042 		}
10043 
10044 		parsed += field_len + 1;
10045 		eir += field_len + 1;
10046 	}
10047 
10048 	return false;
10049 }
10050 
restart_le_scan(struct hci_dev * hdev)10051 static void restart_le_scan(struct hci_dev *hdev)
10052 {
10053 	/* If controller is not scanning we are done. */
10054 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10055 		return;
10056 
10057 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10058 		       hdev->discovery.scan_start +
10059 		       hdev->discovery.scan_duration))
10060 		return;
10061 
10062 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10063 			   DISCOV_LE_RESTART_DELAY);
10064 }
10065 
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10066 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10067 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10068 {
10069 	/* If a RSSI threshold has been specified, and
10070 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10071 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10072 	 * is set, let it through for further processing, as we might need to
10073 	 * restart the scan.
10074 	 *
10075 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10076 	 * the results are also dropped.
10077 	 */
10078 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10079 	    (rssi == HCI_RSSI_INVALID ||
10080 	    (rssi < hdev->discovery.rssi &&
10081 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10082 		return  false;
10083 
10084 	if (hdev->discovery.uuid_count != 0) {
10085 		/* If a list of UUIDs is provided in filter, results with no
10086 		 * matching UUID should be dropped.
10087 		 */
10088 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10089 				   hdev->discovery.uuids) &&
10090 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10091 				   hdev->discovery.uuid_count,
10092 				   hdev->discovery.uuids))
10093 			return false;
10094 	}
10095 
10096 	/* If duplicate filtering does not report RSSI changes, then restart
10097 	 * scanning to ensure updated result with updated RSSI values.
10098 	 */
10099 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10100 		restart_le_scan(hdev);
10101 
10102 		/* Validate RSSI value against the RSSI threshold once more. */
10103 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10104 		    rssi < hdev->discovery.rssi)
10105 			return false;
10106 	}
10107 
10108 	return true;
10109 }
10110 
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10111 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10112 				  bdaddr_t *bdaddr, u8 addr_type)
10113 {
10114 	struct mgmt_ev_adv_monitor_device_lost ev;
10115 
10116 	ev.monitor_handle = cpu_to_le16(handle);
10117 	bacpy(&ev.addr.bdaddr, bdaddr);
10118 	ev.addr.type = addr_type;
10119 
10120 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10121 		   NULL);
10122 }
10123 
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10124 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10125 					       struct sk_buff *skb,
10126 					       struct sock *skip_sk,
10127 					       u16 handle)
10128 {
10129 	struct sk_buff *advmon_skb;
10130 	size_t advmon_skb_len;
10131 	__le16 *monitor_handle;
10132 
10133 	if (!skb)
10134 		return;
10135 
10136 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10137 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10138 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10139 				    advmon_skb_len);
10140 	if (!advmon_skb)
10141 		return;
10142 
10143 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10144 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10145 	 * store monitor_handle of the matched monitor.
10146 	 */
10147 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10148 	*monitor_handle = cpu_to_le16(handle);
10149 	skb_put_data(advmon_skb, skb->data, skb->len);
10150 
10151 	mgmt_event_skb(advmon_skb, skip_sk);
10152 }
10153 
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10154 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10155 					  bdaddr_t *bdaddr, bool report_device,
10156 					  struct sk_buff *skb,
10157 					  struct sock *skip_sk)
10158 {
10159 	struct monitored_device *dev, *tmp;
10160 	bool matched = false;
10161 	bool notified = false;
10162 
10163 	/* We have received the Advertisement Report because:
10164 	 * 1. the kernel has initiated active discovery
10165 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10166 	 *    passive scanning
10167 	 * 3. if none of the above is true, we have one or more active
10168 	 *    Advertisement Monitor
10169 	 *
10170 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10171 	 * and report ONLY one advertisement per device for the matched Monitor
10172 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10173 	 *
10174 	 * For case 3, since we are not active scanning and all advertisements
10175 	 * received are due to a matched Advertisement Monitor, report all
10176 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10177 	 */
10178 	if (report_device && !hdev->advmon_pend_notify) {
10179 		mgmt_event_skb(skb, skip_sk);
10180 		return;
10181 	}
10182 
10183 	hdev->advmon_pend_notify = false;
10184 
10185 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10186 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10187 			matched = true;
10188 
10189 			if (!dev->notified) {
10190 				mgmt_send_adv_monitor_device_found(hdev, skb,
10191 								   skip_sk,
10192 								   dev->handle);
10193 				notified = true;
10194 				dev->notified = true;
10195 			}
10196 		}
10197 
10198 		if (!dev->notified)
10199 			hdev->advmon_pend_notify = true;
10200 	}
10201 
10202 	if (!report_device &&
10203 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10204 		/* Handle 0 indicates that we are not active scanning and this
10205 		 * is a subsequent advertisement report for an already matched
10206 		 * Advertisement Monitor or the controller offloading support
10207 		 * is not available.
10208 		 */
10209 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10210 	}
10211 
10212 	if (report_device)
10213 		mgmt_event_skb(skb, skip_sk);
10214 	else
10215 		kfree_skb(skb);
10216 }
10217 
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10218 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10219 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10220 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10221 			      u64 instant)
10222 {
10223 	struct sk_buff *skb;
10224 	struct mgmt_ev_mesh_device_found *ev;
10225 	int i, j;
10226 
10227 	if (!hdev->mesh_ad_types[0])
10228 		goto accepted;
10229 
10230 	/* Scan for requested AD types */
10231 	if (eir_len > 0) {
10232 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10233 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10234 				if (!hdev->mesh_ad_types[j])
10235 					break;
10236 
10237 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10238 					goto accepted;
10239 			}
10240 		}
10241 	}
10242 
10243 	if (scan_rsp_len > 0) {
10244 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10245 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10246 				if (!hdev->mesh_ad_types[j])
10247 					break;
10248 
10249 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10250 					goto accepted;
10251 			}
10252 		}
10253 	}
10254 
10255 	return;
10256 
10257 accepted:
10258 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10259 			     sizeof(*ev) + eir_len + scan_rsp_len);
10260 	if (!skb)
10261 		return;
10262 
10263 	ev = skb_put(skb, sizeof(*ev));
10264 
10265 	bacpy(&ev->addr.bdaddr, bdaddr);
10266 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10267 	ev->rssi = rssi;
10268 	ev->flags = cpu_to_le32(flags);
10269 	ev->instant = cpu_to_le64(instant);
10270 
10271 	if (eir_len > 0)
10272 		/* Copy EIR or advertising data into event */
10273 		skb_put_data(skb, eir, eir_len);
10274 
10275 	if (scan_rsp_len > 0)
10276 		/* Append scan response data to event */
10277 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10278 
10279 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10280 
10281 	mgmt_event_skb(skb, NULL);
10282 }
10283 
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10284 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10285 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10286 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10287 		       u64 instant)
10288 {
10289 	struct sk_buff *skb;
10290 	struct mgmt_ev_device_found *ev;
10291 	bool report_device = hci_discovery_active(hdev);
10292 
10293 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10294 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10295 				  eir, eir_len, scan_rsp, scan_rsp_len,
10296 				  instant);
10297 
10298 	/* Don't send events for a non-kernel initiated discovery. With
10299 	 * LE one exception is if we have pend_le_reports > 0 in which
10300 	 * case we're doing passive scanning and want these events.
10301 	 */
10302 	if (!hci_discovery_active(hdev)) {
10303 		if (link_type == ACL_LINK)
10304 			return;
10305 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10306 			report_device = true;
10307 		else if (!hci_is_adv_monitoring(hdev))
10308 			return;
10309 	}
10310 
10311 	if (hdev->discovery.result_filtering) {
10312 		/* We are using service discovery */
10313 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10314 				     scan_rsp_len))
10315 			return;
10316 	}
10317 
10318 	if (hdev->discovery.limited) {
10319 		/* Check for limited discoverable bit */
10320 		if (dev_class) {
10321 			if (!(dev_class[1] & 0x20))
10322 				return;
10323 		} else {
10324 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10325 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10326 				return;
10327 		}
10328 	}
10329 
10330 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10331 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10332 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10333 	if (!skb)
10334 		return;
10335 
10336 	ev = skb_put(skb, sizeof(*ev));
10337 
10338 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10339 	 * RSSI value was reported as 0 when not available. This behavior
10340 	 * is kept when using device discovery. This is required for full
10341 	 * backwards compatibility with the API.
10342 	 *
10343 	 * However when using service discovery, the value 127 will be
10344 	 * returned when the RSSI is not available.
10345 	 */
10346 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10347 	    link_type == ACL_LINK)
10348 		rssi = 0;
10349 
10350 	bacpy(&ev->addr.bdaddr, bdaddr);
10351 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10352 	ev->rssi = rssi;
10353 	ev->flags = cpu_to_le32(flags);
10354 
10355 	if (eir_len > 0)
10356 		/* Copy EIR or advertising data into event */
10357 		skb_put_data(skb, eir, eir_len);
10358 
10359 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10360 		u8 eir_cod[5];
10361 
10362 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10363 					   dev_class, 3);
10364 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10365 	}
10366 
10367 	if (scan_rsp_len > 0)
10368 		/* Append scan response data to event */
10369 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10370 
10371 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10372 
10373 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10374 }
10375 
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10376 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10377 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10378 {
10379 	struct sk_buff *skb;
10380 	struct mgmt_ev_device_found *ev;
10381 	u16 eir_len = 0;
10382 	u32 flags = 0;
10383 
10384 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10385 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10386 
10387 	ev = skb_put(skb, sizeof(*ev));
10388 	bacpy(&ev->addr.bdaddr, bdaddr);
10389 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10390 	ev->rssi = rssi;
10391 
10392 	if (name)
10393 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10394 	else
10395 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10396 
10397 	ev->eir_len = cpu_to_le16(eir_len);
10398 	ev->flags = cpu_to_le32(flags);
10399 
10400 	mgmt_event_skb(skb, NULL);
10401 }
10402 
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10403 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10404 {
10405 	struct mgmt_ev_discovering ev;
10406 
10407 	bt_dev_dbg(hdev, "discovering %u", discovering);
10408 
10409 	memset(&ev, 0, sizeof(ev));
10410 	ev.type = hdev->discovery.type;
10411 	ev.discovering = discovering;
10412 
10413 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10414 }
10415 
mgmt_suspending(struct hci_dev * hdev,u8 state)10416 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10417 {
10418 	struct mgmt_ev_controller_suspend ev;
10419 
10420 	ev.suspend_state = state;
10421 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10422 }
10423 
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10424 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10425 		   u8 addr_type)
10426 {
10427 	struct mgmt_ev_controller_resume ev;
10428 
10429 	ev.wake_reason = reason;
10430 	if (bdaddr) {
10431 		bacpy(&ev.addr.bdaddr, bdaddr);
10432 		ev.addr.type = addr_type;
10433 	} else {
10434 		memset(&ev.addr, 0, sizeof(ev.addr));
10435 	}
10436 
10437 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10438 }
10439 
10440 static struct hci_mgmt_chan chan = {
10441 	.channel	= HCI_CHANNEL_CONTROL,
10442 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10443 	.handlers	= mgmt_handlers,
10444 	.hdev_init	= mgmt_init_hdev,
10445 };
10446 
mgmt_init(void)10447 int mgmt_init(void)
10448 {
10449 	return hci_mgmt_chan_register(&chan);
10450 }
10451 
mgmt_exit(void)10452 void mgmt_exit(void)
10453 {
10454 	hci_mgmt_chan_unregister(&chan);
10455 }
10456 
mgmt_cleanup(struct sock * sk)10457 void mgmt_cleanup(struct sock *sk)
10458 {
10459 	struct mgmt_mesh_tx *mesh_tx;
10460 	struct hci_dev *hdev;
10461 
10462 	read_lock(&hci_dev_list_lock);
10463 
10464 	list_for_each_entry(hdev, &hci_dev_list, list) {
10465 		do {
10466 			mesh_tx = mgmt_mesh_next(hdev, sk);
10467 
10468 			if (mesh_tx)
10469 				mesh_send_complete(hdev, mesh_tx, true);
10470 		} while (mesh_tx);
10471 	}
10472 
10473 	read_unlock(&hci_dev_list_lock);
10474 }
10475