xref: /openbmc/linux/net/bluetooth/mgmt.c (revision d32fd6bb9f2bc8178cdd65ebec1ad670a8bfa241)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
855 
856 	return settings;
857 }
858 
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 	u32 settings = 0;
862 
863 	if (hdev_is_powered(hdev))
864 		settings |= MGMT_SETTING_POWERED;
865 
866 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 		settings |= MGMT_SETTING_CONNECTABLE;
868 
869 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 		settings |= MGMT_SETTING_DISCOVERABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 		settings |= MGMT_SETTING_BONDABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 		settings |= MGMT_SETTING_BREDR;
880 
881 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 		settings |= MGMT_SETTING_LE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 		settings |= MGMT_SETTING_LINK_SECURITY;
886 
887 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 		settings |= MGMT_SETTING_SSP;
889 
890 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 		settings |= MGMT_SETTING_ADVERTISING;
892 
893 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 		settings |= MGMT_SETTING_SECURE_CONN;
895 
896 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 		settings |= MGMT_SETTING_DEBUG_KEYS;
898 
899 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 		settings |= MGMT_SETTING_PRIVACY;
901 
902 	/* The current setting for static address has two purposes. The
903 	 * first is to indicate if the static address will be used and
904 	 * the second is to indicate if it is actually set.
905 	 *
906 	 * This means if the static address is not configured, this flag
907 	 * will never be set. If the address is configured, then if the
908 	 * address is actually used decides if the flag is set or not.
909 	 *
910 	 * For single mode LE only controllers and dual-mode controllers
911 	 * with BR/EDR disabled, the existence of the static address will
912 	 * be evaluated.
913 	 */
914 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 			settings |= MGMT_SETTING_STATIC_ADDRESS;
919 	}
920 
921 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923 
924 	if (cis_central_capable(hdev))
925 		settings |= MGMT_SETTING_CIS_CENTRAL;
926 
927 	if (cis_peripheral_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
929 
930 	if (bis_capable(hdev))
931 		settings |= MGMT_SETTING_ISO_BROADCASTER;
932 
933 	if (sync_recv_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935 
936 	return settings;
937 }
938 
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943 
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 	struct mgmt_pending_cmd *cmd;
947 
948 	/* If there's a pending mgmt command the flags will not yet have
949 	 * their final values, so check for this first.
950 	 */
951 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 	if (cmd) {
953 		struct mgmt_mode *cp = cmd->param;
954 		if (cp->val == 0x01)
955 			return LE_AD_GENERAL;
956 		else if (cp->val == 0x02)
957 			return LE_AD_LIMITED;
958 	} else {
959 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 			return LE_AD_LIMITED;
961 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 			return LE_AD_GENERAL;
963 	}
964 
965 	return 0;
966 }
967 
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 	struct mgmt_pending_cmd *cmd;
971 
972 	/* If there's a pending mgmt command the flag will not yet have
973 	 * it's final value, so check for this first.
974 	 */
975 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 	if (cmd) {
977 		struct mgmt_mode *cp = cmd->param;
978 
979 		return cp->val;
980 	}
981 
982 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984 
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 	hci_update_eir_sync(hdev);
988 	hci_update_class_sync(hdev);
989 
990 	return 0;
991 }
992 
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 	struct hci_dev *hdev = container_of(work, struct hci_dev,
996 					    service_cache.work);
997 
998 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 		return;
1000 
1001 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003 
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 	/* The generation of a new RPA and programming it into the
1007 	 * controller happens in the hci_req_enable_advertising()
1008 	 * function.
1009 	 */
1010 	if (ext_adv_capable(hdev))
1011 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 	else
1013 		return hci_enable_advertising_sync(hdev);
1014 }
1015 
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 					    rpa_expired.work);
1020 
1021 	bt_dev_dbg(hdev, "");
1022 
1023 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 
1025 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 		return;
1027 
1028 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030 
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032 
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 					    discov_off.work);
1037 
1038 	bt_dev_dbg(hdev, "");
1039 
1040 	hci_dev_lock(hdev);
1041 
1042 	/* When discoverable timeout triggers, then just make sure
1043 	 * the limited discoverable flag is cleared. Even in the case
1044 	 * of a timeout triggered from general discoverable, it is
1045 	 * safe to unconditionally clear the flag.
1046 	 */
1047 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 	hdev->discov_timeout = 0;
1050 
1051 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052 
1053 	mgmt_new_settings(hdev);
1054 
1055 	hci_dev_unlock(hdev);
1056 }
1057 
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059 
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 	u8 handle = mesh_tx->handle;
1064 
1065 	if (!silent)
1066 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 			   sizeof(handle), NULL);
1068 
1069 	mgmt_mesh_remove(mesh_tx);
1070 }
1071 
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 	struct mgmt_mesh_tx *mesh_tx;
1075 
1076 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 	hci_disable_advertising_sync(hdev);
1078 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1079 
1080 	if (mesh_tx)
1081 		mesh_send_complete(hdev, mesh_tx, false);
1082 
1083 	return 0;
1084 }
1085 
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (!mesh_tx)
1093 		return;
1094 
1095 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 				 mesh_send_start_complete);
1097 
1098 	if (err < 0)
1099 		mesh_send_complete(hdev, mesh_tx, false);
1100 	else
1101 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103 
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 					    mesh_send_done.work);
1108 
1109 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 		return;
1111 
1112 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114 
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 		return;
1119 
1120 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121 
1122 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126 
1127 	/* Non-mgmt controlled devices get this bit set
1128 	 * implicitly so that pairing works for them, however
1129 	 * for mgmt we require user-space to explicitly enable
1130 	 * it
1131 	 */
1132 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133 
1134 	hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136 
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 				void *data, u16 data_len)
1139 {
1140 	struct mgmt_rp_read_info rp;
1141 
1142 	bt_dev_dbg(hdev, "sock %p", sk);
1143 
1144 	hci_dev_lock(hdev);
1145 
1146 	memset(&rp, 0, sizeof(rp));
1147 
1148 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1149 
1150 	rp.version = hdev->hci_ver;
1151 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152 
1153 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	memcpy(rp.dev_class, hdev->dev_class, 3);
1157 
1158 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160 
1161 	hci_dev_unlock(hdev);
1162 
1163 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 				 sizeof(rp));
1165 }
1166 
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 	u16 eir_len = 0;
1170 	size_t name_len;
1171 
1172 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 					  hdev->dev_class, 3);
1175 
1176 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 					  hdev->appearance);
1179 
1180 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 				  hdev->dev_name, name_len);
1183 
1184 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 				  hdev->short_name, name_len);
1187 
1188 	return eir_len;
1189 }
1190 
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 				    void *data, u16 data_len)
1193 {
1194 	char buf[512];
1195 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 	u16 eir_len;
1197 
1198 	bt_dev_dbg(hdev, "sock %p", sk);
1199 
1200 	memset(&buf, 0, sizeof(buf));
1201 
1202 	hci_dev_lock(hdev);
1203 
1204 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1205 
1206 	rp->version = hdev->hci_ver;
1207 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208 
1209 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211 
1212 
1213 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 	rp->eir_len = cpu_to_le16(eir_len);
1215 
1216 	hci_dev_unlock(hdev);
1217 
1218 	/* If this command is called at least once, then the events
1219 	 * for class of device and local name changes are disabled
1220 	 * and only the new extended controller information event
1221 	 * is used.
1222 	 */
1223 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226 
1227 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 				 sizeof(*rp) + eir_len);
1229 }
1230 
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 	char buf[512];
1234 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 	u16 eir_len;
1236 
1237 	memset(buf, 0, sizeof(buf));
1238 
1239 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 	ev->eir_len = cpu_to_le16(eir_len);
1241 
1242 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 				  sizeof(*ev) + eir_len,
1244 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246 
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 
1251 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 				 sizeof(settings));
1253 }
1254 
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 	struct mgmt_ev_advertising_added ev;
1258 
1259 	ev.instance = instance;
1260 
1261 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263 
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 			      u8 instance)
1266 {
1267 	struct mgmt_ev_advertising_removed ev;
1268 
1269 	ev.instance = instance;
1270 
1271 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273 
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 	if (hdev->adv_instance_timeout) {
1277 		hdev->adv_instance_timeout = 0;
1278 		cancel_delayed_work(&hdev->adv_instance_expire);
1279 	}
1280 }
1281 
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 	struct hci_conn_params *p;
1286 
1287 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 		/* Needed for AUTO_OFF case where might not "really"
1289 		 * have been powered off.
1290 		 */
1291 		hci_pend_le_list_del_init(p);
1292 
1293 		switch (p->auto_connect) {
1294 		case HCI_AUTO_CONN_DIRECT:
1295 		case HCI_AUTO_CONN_ALWAYS:
1296 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 			break;
1298 		case HCI_AUTO_CONN_REPORT:
1299 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 			break;
1301 		default:
1302 			break;
1303 		}
1304 	}
1305 }
1306 
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1310 
1311 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314 
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 	struct mgmt_pending_cmd *cmd = data;
1318 	struct mgmt_mode *cp;
1319 
1320 	/* Make sure cmd still outstanding. */
1321 	if (err == -ECANCELED ||
1322 	    cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 		return;
1324 
1325 	cp = cmd->param;
1326 
1327 	bt_dev_dbg(hdev, "err %d", err);
1328 
1329 	if (!err) {
1330 		if (cp->val) {
1331 			hci_dev_lock(hdev);
1332 			restart_le_actions(hdev);
1333 			hci_update_passive_scan(hdev);
1334 			hci_dev_unlock(hdev);
1335 		}
1336 
1337 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338 
1339 		/* Only call new_setting for power on as power off is deferred
1340 		 * to hdev->power_off work which does call hci_dev_do_close.
1341 		 */
1342 		if (cp->val)
1343 			new_settings(hdev, cmd->sk);
1344 	} else {
1345 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 				mgmt_status(err));
1347 	}
1348 
1349 	mgmt_pending_remove(cmd);
1350 }
1351 
set_powered_sync(struct hci_dev * hdev,void * data)1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 	struct mgmt_pending_cmd *cmd = data;
1355 	struct mgmt_mode *cp;
1356 
1357 	/* Make sure cmd still outstanding. */
1358 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 		return -ECANCELED;
1360 
1361 	cp = cmd->param;
1362 
1363 	BT_DBG("%s", hdev->name);
1364 
1365 	return hci_set_powered_sync(hdev, cp->val);
1366 }
1367 
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 		       u16 len)
1370 {
1371 	struct mgmt_mode *cp = data;
1372 	struct mgmt_pending_cmd *cmd;
1373 	int err;
1374 
1375 	bt_dev_dbg(hdev, "sock %p", sk);
1376 
1377 	if (cp->val != 0x00 && cp->val != 0x01)
1378 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 				       MGMT_STATUS_INVALID_PARAMS);
1380 
1381 	hci_dev_lock(hdev);
1382 
1383 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 				      MGMT_STATUS_BUSY);
1386 		goto failed;
1387 	}
1388 
1389 	if (!!cp->val == hdev_is_powered(hdev)) {
1390 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 		goto failed;
1392 	}
1393 
1394 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 	if (!cmd) {
1396 		err = -ENOMEM;
1397 		goto failed;
1398 	}
1399 
1400 	/* Cancel potentially blocking sync operation before power off */
1401 	if (cp->val == 0x00) {
1402 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 					 mgmt_set_powered_complete);
1405 	} else {
1406 		/* Use hci_cmd_sync_submit since hdev might not be running */
1407 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 					  mgmt_set_powered_complete);
1409 	}
1410 
1411 	if (err < 0)
1412 		mgmt_pending_remove(cmd);
1413 
1414 failed:
1415 	hci_dev_unlock(hdev);
1416 	return err;
1417 }
1418 
mgmt_new_settings(struct hci_dev * hdev)1419 int mgmt_new_settings(struct hci_dev *hdev)
1420 {
1421 	return new_settings(hdev, NULL);
1422 }
1423 
1424 struct cmd_lookup {
1425 	struct sock *sk;
1426 	struct hci_dev *hdev;
1427 	u8 mgmt_status;
1428 };
1429 
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 {
1432 	struct cmd_lookup *match = data;
1433 
1434 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435 
1436 	list_del(&cmd->list);
1437 
1438 	if (match->sk == NULL) {
1439 		match->sk = cmd->sk;
1440 		sock_hold(match->sk);
1441 	}
1442 
1443 	mgmt_pending_free(cmd);
1444 }
1445 
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1446 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447 {
1448 	u8 *status = data;
1449 
1450 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 	mgmt_pending_remove(cmd);
1452 }
1453 
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 	struct cmd_lookup *match = data;
1457 
1458 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1459 	 * removed/freed.
1460 	 */
1461 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1462 
1463 	if (cmd->cmd_complete) {
1464 		cmd->cmd_complete(cmd, match->mgmt_status);
1465 		mgmt_pending_remove(cmd);
1466 
1467 		return;
1468 	}
1469 
1470 	cmd_status_rsp(cmd, data);
1471 }
1472 
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 				 cmd->param, cmd->param_len);
1477 }
1478 
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 				 cmd->param, sizeof(struct mgmt_addr_info));
1483 }
1484 
mgmt_bredr_support(struct hci_dev * hdev)1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 {
1487 	if (!lmp_bredr_capable(hdev))
1488 		return MGMT_STATUS_NOT_SUPPORTED;
1489 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 		return MGMT_STATUS_REJECTED;
1491 	else
1492 		return MGMT_STATUS_SUCCESS;
1493 }
1494 
mgmt_le_support(struct hci_dev * hdev)1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 {
1497 	if (!lmp_le_capable(hdev))
1498 		return MGMT_STATUS_NOT_SUPPORTED;
1499 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 		return MGMT_STATUS_REJECTED;
1501 	else
1502 		return MGMT_STATUS_SUCCESS;
1503 }
1504 
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1506 					   int err)
1507 {
1508 	struct mgmt_pending_cmd *cmd = data;
1509 
1510 	bt_dev_dbg(hdev, "err %d", err);
1511 
1512 	/* Make sure cmd still outstanding. */
1513 	if (err == -ECANCELED ||
1514 	    cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1515 		return;
1516 
1517 	hci_dev_lock(hdev);
1518 
1519 	if (err) {
1520 		u8 mgmt_err = mgmt_status(err);
1521 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1522 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1523 		goto done;
1524 	}
1525 
1526 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1527 	    hdev->discov_timeout > 0) {
1528 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1529 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1530 	}
1531 
1532 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1533 	new_settings(hdev, cmd->sk);
1534 
1535 done:
1536 	mgmt_pending_remove(cmd);
1537 	hci_dev_unlock(hdev);
1538 }
1539 
set_discoverable_sync(struct hci_dev * hdev,void * data)1540 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1541 {
1542 	BT_DBG("%s", hdev->name);
1543 
1544 	return hci_update_discoverable_sync(hdev);
1545 }
1546 
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1547 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1548 			    u16 len)
1549 {
1550 	struct mgmt_cp_set_discoverable *cp = data;
1551 	struct mgmt_pending_cmd *cmd;
1552 	u16 timeout;
1553 	int err;
1554 
1555 	bt_dev_dbg(hdev, "sock %p", sk);
1556 
1557 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1558 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1559 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560 				       MGMT_STATUS_REJECTED);
1561 
1562 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1563 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1564 				       MGMT_STATUS_INVALID_PARAMS);
1565 
1566 	timeout = __le16_to_cpu(cp->timeout);
1567 
1568 	/* Disabling discoverable requires that no timeout is set,
1569 	 * and enabling limited discoverable requires a timeout.
1570 	 */
1571 	if ((cp->val == 0x00 && timeout > 0) ||
1572 	    (cp->val == 0x02 && timeout == 0))
1573 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 				       MGMT_STATUS_INVALID_PARAMS);
1575 
1576 	hci_dev_lock(hdev);
1577 
1578 	if (!hdev_is_powered(hdev) && timeout > 0) {
1579 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 				      MGMT_STATUS_NOT_POWERED);
1581 		goto failed;
1582 	}
1583 
1584 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1585 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1586 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 				      MGMT_STATUS_BUSY);
1588 		goto failed;
1589 	}
1590 
1591 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1592 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 				      MGMT_STATUS_REJECTED);
1594 		goto failed;
1595 	}
1596 
1597 	if (hdev->advertising_paused) {
1598 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 				      MGMT_STATUS_BUSY);
1600 		goto failed;
1601 	}
1602 
1603 	if (!hdev_is_powered(hdev)) {
1604 		bool changed = false;
1605 
1606 		/* Setting limited discoverable when powered off is
1607 		 * not a valid operation since it requires a timeout
1608 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1609 		 */
1610 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1611 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1612 			changed = true;
1613 		}
1614 
1615 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 		if (err < 0)
1617 			goto failed;
1618 
1619 		if (changed)
1620 			err = new_settings(hdev, sk);
1621 
1622 		goto failed;
1623 	}
1624 
1625 	/* If the current mode is the same, then just update the timeout
1626 	 * value with the new value. And if only the timeout gets updated,
1627 	 * then no need for any HCI transactions.
1628 	 */
1629 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1630 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1631 						   HCI_LIMITED_DISCOVERABLE)) {
1632 		cancel_delayed_work(&hdev->discov_off);
1633 		hdev->discov_timeout = timeout;
1634 
1635 		if (cp->val && hdev->discov_timeout > 0) {
1636 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1637 			queue_delayed_work(hdev->req_workqueue,
1638 					   &hdev->discov_off, to);
1639 		}
1640 
1641 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1642 		goto failed;
1643 	}
1644 
1645 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1646 	if (!cmd) {
1647 		err = -ENOMEM;
1648 		goto failed;
1649 	}
1650 
1651 	/* Cancel any potential discoverable timeout that might be
1652 	 * still active and store new timeout value. The arming of
1653 	 * the timeout happens in the complete handler.
1654 	 */
1655 	cancel_delayed_work(&hdev->discov_off);
1656 	hdev->discov_timeout = timeout;
1657 
1658 	if (cp->val)
1659 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1660 	else
1661 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1662 
1663 	/* Limited discoverable mode */
1664 	if (cp->val == 0x02)
1665 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1666 	else
1667 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1668 
1669 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1670 				 mgmt_set_discoverable_complete);
1671 
1672 	if (err < 0)
1673 		mgmt_pending_remove(cmd);
1674 
1675 failed:
1676 	hci_dev_unlock(hdev);
1677 	return err;
1678 }
1679 
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1680 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1681 					  int err)
1682 {
1683 	struct mgmt_pending_cmd *cmd = data;
1684 
1685 	bt_dev_dbg(hdev, "err %d", err);
1686 
1687 	/* Make sure cmd still outstanding. */
1688 	if (err == -ECANCELED ||
1689 	    cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1690 		return;
1691 
1692 	hci_dev_lock(hdev);
1693 
1694 	if (err) {
1695 		u8 mgmt_err = mgmt_status(err);
1696 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1697 		goto done;
1698 	}
1699 
1700 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1701 	new_settings(hdev, cmd->sk);
1702 
1703 done:
1704 	if (cmd)
1705 		mgmt_pending_remove(cmd);
1706 
1707 	hci_dev_unlock(hdev);
1708 }
1709 
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1710 static int set_connectable_update_settings(struct hci_dev *hdev,
1711 					   struct sock *sk, u8 val)
1712 {
1713 	bool changed = false;
1714 	int err;
1715 
1716 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1717 		changed = true;
1718 
1719 	if (val) {
1720 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1721 	} else {
1722 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1723 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1724 	}
1725 
1726 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1727 	if (err < 0)
1728 		return err;
1729 
1730 	if (changed) {
1731 		hci_update_scan(hdev);
1732 		hci_update_passive_scan(hdev);
1733 		return new_settings(hdev, sk);
1734 	}
1735 
1736 	return 0;
1737 }
1738 
set_connectable_sync(struct hci_dev * hdev,void * data)1739 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1740 {
1741 	BT_DBG("%s", hdev->name);
1742 
1743 	return hci_update_connectable_sync(hdev);
1744 }
1745 
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1746 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1747 			   u16 len)
1748 {
1749 	struct mgmt_mode *cp = data;
1750 	struct mgmt_pending_cmd *cmd;
1751 	int err;
1752 
1753 	bt_dev_dbg(hdev, "sock %p", sk);
1754 
1755 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1756 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1757 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1758 				       MGMT_STATUS_REJECTED);
1759 
1760 	if (cp->val != 0x00 && cp->val != 0x01)
1761 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1762 				       MGMT_STATUS_INVALID_PARAMS);
1763 
1764 	hci_dev_lock(hdev);
1765 
1766 	if (!hdev_is_powered(hdev)) {
1767 		err = set_connectable_update_settings(hdev, sk, cp->val);
1768 		goto failed;
1769 	}
1770 
1771 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1772 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1773 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1774 				      MGMT_STATUS_BUSY);
1775 		goto failed;
1776 	}
1777 
1778 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1779 	if (!cmd) {
1780 		err = -ENOMEM;
1781 		goto failed;
1782 	}
1783 
1784 	if (cp->val) {
1785 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1786 	} else {
1787 		if (hdev->discov_timeout > 0)
1788 			cancel_delayed_work(&hdev->discov_off);
1789 
1790 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1791 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1792 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1793 	}
1794 
1795 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1796 				 mgmt_set_connectable_complete);
1797 
1798 	if (err < 0)
1799 		mgmt_pending_remove(cmd);
1800 
1801 failed:
1802 	hci_dev_unlock(hdev);
1803 	return err;
1804 }
1805 
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1806 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1807 			u16 len)
1808 {
1809 	struct mgmt_mode *cp = data;
1810 	bool changed;
1811 	int err;
1812 
1813 	bt_dev_dbg(hdev, "sock %p", sk);
1814 
1815 	if (cp->val != 0x00 && cp->val != 0x01)
1816 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1817 				       MGMT_STATUS_INVALID_PARAMS);
1818 
1819 	hci_dev_lock(hdev);
1820 
1821 	if (cp->val)
1822 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1823 	else
1824 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1825 
1826 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1827 	if (err < 0)
1828 		goto unlock;
1829 
1830 	if (changed) {
1831 		/* In limited privacy mode the change of bondable mode
1832 		 * may affect the local advertising address.
1833 		 */
1834 		hci_update_discoverable(hdev);
1835 
1836 		err = new_settings(hdev, sk);
1837 	}
1838 
1839 unlock:
1840 	hci_dev_unlock(hdev);
1841 	return err;
1842 }
1843 
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1844 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1845 			     u16 len)
1846 {
1847 	struct mgmt_mode *cp = data;
1848 	struct mgmt_pending_cmd *cmd;
1849 	u8 val, status;
1850 	int err;
1851 
1852 	bt_dev_dbg(hdev, "sock %p", sk);
1853 
1854 	status = mgmt_bredr_support(hdev);
1855 	if (status)
1856 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1857 				       status);
1858 
1859 	if (cp->val != 0x00 && cp->val != 0x01)
1860 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1861 				       MGMT_STATUS_INVALID_PARAMS);
1862 
1863 	hci_dev_lock(hdev);
1864 
1865 	if (!hdev_is_powered(hdev)) {
1866 		bool changed = false;
1867 
1868 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1869 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1870 			changed = true;
1871 		}
1872 
1873 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1874 		if (err < 0)
1875 			goto failed;
1876 
1877 		if (changed)
1878 			err = new_settings(hdev, sk);
1879 
1880 		goto failed;
1881 	}
1882 
1883 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1884 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1885 				      MGMT_STATUS_BUSY);
1886 		goto failed;
1887 	}
1888 
1889 	val = !!cp->val;
1890 
1891 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1892 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1893 		goto failed;
1894 	}
1895 
1896 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1897 	if (!cmd) {
1898 		err = -ENOMEM;
1899 		goto failed;
1900 	}
1901 
1902 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1903 	if (err < 0) {
1904 		mgmt_pending_remove(cmd);
1905 		goto failed;
1906 	}
1907 
1908 failed:
1909 	hci_dev_unlock(hdev);
1910 	return err;
1911 }
1912 
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1913 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1914 {
1915 	struct cmd_lookup match = { NULL, hdev };
1916 	struct mgmt_pending_cmd *cmd = data;
1917 	struct mgmt_mode *cp = cmd->param;
1918 	u8 enable = cp->val;
1919 	bool changed;
1920 
1921 	/* Make sure cmd still outstanding. */
1922 	if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1923 		return;
1924 
1925 	if (err) {
1926 		u8 mgmt_err = mgmt_status(err);
1927 
1928 		if (enable && hci_dev_test_and_clear_flag(hdev,
1929 							  HCI_SSP_ENABLED)) {
1930 			new_settings(hdev, NULL);
1931 		}
1932 
1933 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1934 				     &mgmt_err);
1935 		return;
1936 	}
1937 
1938 	if (enable) {
1939 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1940 	} else {
1941 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1942 	}
1943 
1944 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1945 
1946 	if (changed)
1947 		new_settings(hdev, match.sk);
1948 
1949 	if (match.sk)
1950 		sock_put(match.sk);
1951 
1952 	hci_update_eir_sync(hdev);
1953 }
1954 
set_ssp_sync(struct hci_dev * hdev,void * data)1955 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1956 {
1957 	struct mgmt_pending_cmd *cmd = data;
1958 	struct mgmt_mode *cp = cmd->param;
1959 	bool changed = false;
1960 	int err;
1961 
1962 	if (cp->val)
1963 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1964 
1965 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1966 
1967 	if (!err && changed)
1968 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1969 
1970 	return err;
1971 }
1972 
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1973 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1974 {
1975 	struct mgmt_mode *cp = data;
1976 	struct mgmt_pending_cmd *cmd;
1977 	u8 status;
1978 	int err;
1979 
1980 	bt_dev_dbg(hdev, "sock %p", sk);
1981 
1982 	status = mgmt_bredr_support(hdev);
1983 	if (status)
1984 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1985 
1986 	if (!lmp_ssp_capable(hdev))
1987 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1988 				       MGMT_STATUS_NOT_SUPPORTED);
1989 
1990 	if (cp->val != 0x00 && cp->val != 0x01)
1991 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1992 				       MGMT_STATUS_INVALID_PARAMS);
1993 
1994 	hci_dev_lock(hdev);
1995 
1996 	if (!hdev_is_powered(hdev)) {
1997 		bool changed;
1998 
1999 		if (cp->val) {
2000 			changed = !hci_dev_test_and_set_flag(hdev,
2001 							     HCI_SSP_ENABLED);
2002 		} else {
2003 			changed = hci_dev_test_and_clear_flag(hdev,
2004 							      HCI_SSP_ENABLED);
2005 		}
2006 
2007 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2008 		if (err < 0)
2009 			goto failed;
2010 
2011 		if (changed)
2012 			err = new_settings(hdev, sk);
2013 
2014 		goto failed;
2015 	}
2016 
2017 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2018 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 				      MGMT_STATUS_BUSY);
2020 		goto failed;
2021 	}
2022 
2023 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2024 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2025 		goto failed;
2026 	}
2027 
2028 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2029 	if (!cmd)
2030 		err = -ENOMEM;
2031 	else
2032 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2033 					 set_ssp_complete);
2034 
2035 	if (err < 0) {
2036 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2037 				      MGMT_STATUS_FAILED);
2038 
2039 		if (cmd)
2040 			mgmt_pending_remove(cmd);
2041 	}
2042 
2043 failed:
2044 	hci_dev_unlock(hdev);
2045 	return err;
2046 }
2047 
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2048 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2049 {
2050 	bt_dev_dbg(hdev, "sock %p", sk);
2051 
2052 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2053 				       MGMT_STATUS_NOT_SUPPORTED);
2054 }
2055 
set_le_complete(struct hci_dev * hdev,void * data,int err)2056 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2057 {
2058 	struct cmd_lookup match = { NULL, hdev };
2059 	u8 status = mgmt_status(err);
2060 
2061 	bt_dev_dbg(hdev, "err %d", err);
2062 
2063 	if (status) {
2064 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2065 							&status);
2066 		return;
2067 	}
2068 
2069 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2070 
2071 	new_settings(hdev, match.sk);
2072 
2073 	if (match.sk)
2074 		sock_put(match.sk);
2075 }
2076 
set_le_sync(struct hci_dev * hdev,void * data)2077 static int set_le_sync(struct hci_dev *hdev, void *data)
2078 {
2079 	struct mgmt_pending_cmd *cmd = data;
2080 	struct mgmt_mode *cp = cmd->param;
2081 	u8 val = !!cp->val;
2082 	int err;
2083 
2084 	if (!val) {
2085 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2086 
2087 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2088 			hci_disable_advertising_sync(hdev);
2089 
2090 		if (ext_adv_capable(hdev))
2091 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2092 	} else {
2093 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2094 	}
2095 
2096 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2097 
2098 	/* Make sure the controller has a good default for
2099 	 * advertising data. Restrict the update to when LE
2100 	 * has actually been enabled. During power on, the
2101 	 * update in powered_update_hci will take care of it.
2102 	 */
2103 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2104 		if (ext_adv_capable(hdev)) {
2105 			int status;
2106 
2107 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2108 			if (!status)
2109 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2110 		} else {
2111 			hci_update_adv_data_sync(hdev, 0x00);
2112 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2113 		}
2114 
2115 		hci_update_passive_scan(hdev);
2116 	}
2117 
2118 	return err;
2119 }
2120 
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2121 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2122 {
2123 	struct mgmt_pending_cmd *cmd = data;
2124 	u8 status = mgmt_status(err);
2125 	struct sock *sk = cmd->sk;
2126 
2127 	if (status) {
2128 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2129 				     cmd_status_rsp, &status);
2130 		return;
2131 	}
2132 
2133 	mgmt_pending_remove(cmd);
2134 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2135 }
2136 
set_mesh_sync(struct hci_dev * hdev,void * data)2137 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2138 {
2139 	struct mgmt_pending_cmd *cmd = data;
2140 	struct mgmt_cp_set_mesh *cp = cmd->param;
2141 	size_t len = cmd->param_len;
2142 
2143 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2144 
2145 	if (cp->enable)
2146 		hci_dev_set_flag(hdev, HCI_MESH);
2147 	else
2148 		hci_dev_clear_flag(hdev, HCI_MESH);
2149 
2150 	len -= sizeof(*cp);
2151 
2152 	/* If filters don't fit, forward all adv pkts */
2153 	if (len <= sizeof(hdev->mesh_ad_types))
2154 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2155 
2156 	hci_update_passive_scan_sync(hdev);
2157 	return 0;
2158 }
2159 
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2160 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2161 {
2162 	struct mgmt_cp_set_mesh *cp = data;
2163 	struct mgmt_pending_cmd *cmd;
2164 	int err = 0;
2165 
2166 	bt_dev_dbg(hdev, "sock %p", sk);
2167 
2168 	if (!lmp_le_capable(hdev) ||
2169 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2170 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2171 				       MGMT_STATUS_NOT_SUPPORTED);
2172 
2173 	if (cp->enable != 0x00 && cp->enable != 0x01)
2174 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2175 				       MGMT_STATUS_INVALID_PARAMS);
2176 
2177 	hci_dev_lock(hdev);
2178 
2179 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2180 	if (!cmd)
2181 		err = -ENOMEM;
2182 	else
2183 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2184 					 set_mesh_complete);
2185 
2186 	if (err < 0) {
2187 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2188 				      MGMT_STATUS_FAILED);
2189 
2190 		if (cmd)
2191 			mgmt_pending_remove(cmd);
2192 	}
2193 
2194 	hci_dev_unlock(hdev);
2195 	return err;
2196 }
2197 
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2198 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2199 {
2200 	struct mgmt_mesh_tx *mesh_tx = data;
2201 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2202 	unsigned long mesh_send_interval;
2203 	u8 mgmt_err = mgmt_status(err);
2204 
2205 	/* Report any errors here, but don't report completion */
2206 
2207 	if (mgmt_err) {
2208 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2209 		/* Send Complete Error Code for handle */
2210 		mesh_send_complete(hdev, mesh_tx, false);
2211 		return;
2212 	}
2213 
2214 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2215 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2216 			   mesh_send_interval);
2217 }
2218 
mesh_send_sync(struct hci_dev * hdev,void * data)2219 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2220 {
2221 	struct mgmt_mesh_tx *mesh_tx = data;
2222 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2223 	struct adv_info *adv, *next_instance;
2224 	u8 instance = hdev->le_num_of_adv_sets + 1;
2225 	u16 timeout, duration;
2226 	int err = 0;
2227 
2228 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2229 		return MGMT_STATUS_BUSY;
2230 
2231 	timeout = 1000;
2232 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2233 	adv = hci_add_adv_instance(hdev, instance, 0,
2234 				   send->adv_data_len, send->adv_data,
2235 				   0, NULL,
2236 				   timeout, duration,
2237 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2238 				   hdev->le_adv_min_interval,
2239 				   hdev->le_adv_max_interval,
2240 				   mesh_tx->handle);
2241 
2242 	if (!IS_ERR(adv))
2243 		mesh_tx->instance = instance;
2244 	else
2245 		err = PTR_ERR(adv);
2246 
2247 	if (hdev->cur_adv_instance == instance) {
2248 		/* If the currently advertised instance is being changed then
2249 		 * cancel the current advertising and schedule the next
2250 		 * instance. If there is only one instance then the overridden
2251 		 * advertising data will be visible right away.
2252 		 */
2253 		cancel_adv_timeout(hdev);
2254 
2255 		next_instance = hci_get_next_instance(hdev, instance);
2256 		if (next_instance)
2257 			instance = next_instance->instance;
2258 		else
2259 			instance = 0;
2260 	} else if (hdev->adv_instance_timeout) {
2261 		/* Immediately advertise the new instance if no other, or
2262 		 * let it go naturally from queue if ADV is already happening
2263 		 */
2264 		instance = 0;
2265 	}
2266 
2267 	if (instance)
2268 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2269 
2270 	return err;
2271 }
2272 
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2273 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2274 {
2275 	struct mgmt_rp_mesh_read_features *rp = data;
2276 
2277 	if (rp->used_handles >= rp->max_handles)
2278 		return;
2279 
2280 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2281 }
2282 
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2283 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2284 			 void *data, u16 len)
2285 {
2286 	struct mgmt_rp_mesh_read_features rp;
2287 
2288 	if (!lmp_le_capable(hdev) ||
2289 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2290 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2291 				       MGMT_STATUS_NOT_SUPPORTED);
2292 
2293 	memset(&rp, 0, sizeof(rp));
2294 	rp.index = cpu_to_le16(hdev->id);
2295 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2296 		rp.max_handles = MESH_HANDLES_MAX;
2297 
2298 	hci_dev_lock(hdev);
2299 
2300 	if (rp.max_handles)
2301 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2302 
2303 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2304 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2305 
2306 	hci_dev_unlock(hdev);
2307 	return 0;
2308 }
2309 
send_cancel(struct hci_dev * hdev,void * data)2310 static int send_cancel(struct hci_dev *hdev, void *data)
2311 {
2312 	struct mgmt_pending_cmd *cmd = data;
2313 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2314 	struct mgmt_mesh_tx *mesh_tx;
2315 
2316 	if (!cancel->handle) {
2317 		do {
2318 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2319 
2320 			if (mesh_tx)
2321 				mesh_send_complete(hdev, mesh_tx, false);
2322 		} while (mesh_tx);
2323 	} else {
2324 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2325 
2326 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2327 			mesh_send_complete(hdev, mesh_tx, false);
2328 	}
2329 
2330 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2331 			  0, NULL, 0);
2332 	mgmt_pending_free(cmd);
2333 
2334 	return 0;
2335 }
2336 
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2337 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2338 			    void *data, u16 len)
2339 {
2340 	struct mgmt_pending_cmd *cmd;
2341 	int err;
2342 
2343 	if (!lmp_le_capable(hdev) ||
2344 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2345 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2346 				       MGMT_STATUS_NOT_SUPPORTED);
2347 
2348 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2349 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2350 				       MGMT_STATUS_REJECTED);
2351 
2352 	hci_dev_lock(hdev);
2353 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2354 	if (!cmd)
2355 		err = -ENOMEM;
2356 	else
2357 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2358 
2359 	if (err < 0) {
2360 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2361 				      MGMT_STATUS_FAILED);
2362 
2363 		if (cmd)
2364 			mgmt_pending_free(cmd);
2365 	}
2366 
2367 	hci_dev_unlock(hdev);
2368 	return err;
2369 }
2370 
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2371 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2372 {
2373 	struct mgmt_mesh_tx *mesh_tx;
2374 	struct mgmt_cp_mesh_send *send = data;
2375 	struct mgmt_rp_mesh_read_features rp;
2376 	bool sending;
2377 	int err = 0;
2378 
2379 	if (!lmp_le_capable(hdev) ||
2380 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2381 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2382 				       MGMT_STATUS_NOT_SUPPORTED);
2383 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2384 	    len <= MGMT_MESH_SEND_SIZE ||
2385 	    len > (MGMT_MESH_SEND_SIZE + 31))
2386 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2387 				       MGMT_STATUS_REJECTED);
2388 
2389 	hci_dev_lock(hdev);
2390 
2391 	memset(&rp, 0, sizeof(rp));
2392 	rp.max_handles = MESH_HANDLES_MAX;
2393 
2394 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2395 
2396 	if (rp.max_handles <= rp.used_handles) {
2397 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2398 				      MGMT_STATUS_BUSY);
2399 		goto done;
2400 	}
2401 
2402 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2403 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2404 
2405 	if (!mesh_tx)
2406 		err = -ENOMEM;
2407 	else if (!sending)
2408 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2409 					 mesh_send_start_complete);
2410 
2411 	if (err < 0) {
2412 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2413 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2414 				      MGMT_STATUS_FAILED);
2415 
2416 		if (mesh_tx) {
2417 			if (sending)
2418 				mgmt_mesh_remove(mesh_tx);
2419 		}
2420 	} else {
2421 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2422 
2423 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2424 				  &mesh_tx->handle, 1);
2425 	}
2426 
2427 done:
2428 	hci_dev_unlock(hdev);
2429 	return err;
2430 }
2431 
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2432 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2433 {
2434 	struct mgmt_mode *cp = data;
2435 	struct mgmt_pending_cmd *cmd;
2436 	int err;
2437 	u8 val, enabled;
2438 
2439 	bt_dev_dbg(hdev, "sock %p", sk);
2440 
2441 	if (!lmp_le_capable(hdev))
2442 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2443 				       MGMT_STATUS_NOT_SUPPORTED);
2444 
2445 	if (cp->val != 0x00 && cp->val != 0x01)
2446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2447 				       MGMT_STATUS_INVALID_PARAMS);
2448 
2449 	/* Bluetooth single mode LE only controllers or dual-mode
2450 	 * controllers configured as LE only devices, do not allow
2451 	 * switching LE off. These have either LE enabled explicitly
2452 	 * or BR/EDR has been previously switched off.
2453 	 *
2454 	 * When trying to enable an already enabled LE, then gracefully
2455 	 * send a positive response. Trying to disable it however will
2456 	 * result into rejection.
2457 	 */
2458 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2459 		if (cp->val == 0x01)
2460 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2461 
2462 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2463 				       MGMT_STATUS_REJECTED);
2464 	}
2465 
2466 	hci_dev_lock(hdev);
2467 
2468 	val = !!cp->val;
2469 	enabled = lmp_host_le_capable(hdev);
2470 
2471 	if (!hdev_is_powered(hdev) || val == enabled) {
2472 		bool changed = false;
2473 
2474 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2475 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2476 			changed = true;
2477 		}
2478 
2479 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2480 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2481 			changed = true;
2482 		}
2483 
2484 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2485 		if (err < 0)
2486 			goto unlock;
2487 
2488 		if (changed)
2489 			err = new_settings(hdev, sk);
2490 
2491 		goto unlock;
2492 	}
2493 
2494 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2495 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2496 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2497 				      MGMT_STATUS_BUSY);
2498 		goto unlock;
2499 	}
2500 
2501 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2502 	if (!cmd)
2503 		err = -ENOMEM;
2504 	else
2505 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2506 					 set_le_complete);
2507 
2508 	if (err < 0) {
2509 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2510 				      MGMT_STATUS_FAILED);
2511 
2512 		if (cmd)
2513 			mgmt_pending_remove(cmd);
2514 	}
2515 
2516 unlock:
2517 	hci_dev_unlock(hdev);
2518 	return err;
2519 }
2520 
2521 /* This is a helper function to test for pending mgmt commands that can
2522  * cause CoD or EIR HCI commands. We can only allow one such pending
2523  * mgmt command at a time since otherwise we cannot easily track what
2524  * the current values are, will be, and based on that calculate if a new
2525  * HCI command needs to be sent and if yes with what value.
2526  */
pending_eir_or_class(struct hci_dev * hdev)2527 static bool pending_eir_or_class(struct hci_dev *hdev)
2528 {
2529 	struct mgmt_pending_cmd *cmd;
2530 
2531 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2532 		switch (cmd->opcode) {
2533 		case MGMT_OP_ADD_UUID:
2534 		case MGMT_OP_REMOVE_UUID:
2535 		case MGMT_OP_SET_DEV_CLASS:
2536 		case MGMT_OP_SET_POWERED:
2537 			return true;
2538 		}
2539 	}
2540 
2541 	return false;
2542 }
2543 
2544 static const u8 bluetooth_base_uuid[] = {
2545 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2546 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2547 };
2548 
get_uuid_size(const u8 * uuid)2549 static u8 get_uuid_size(const u8 *uuid)
2550 {
2551 	u32 val;
2552 
2553 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2554 		return 128;
2555 
2556 	val = get_unaligned_le32(&uuid[12]);
2557 	if (val > 0xffff)
2558 		return 32;
2559 
2560 	return 16;
2561 }
2562 
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2563 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2564 {
2565 	struct mgmt_pending_cmd *cmd = data;
2566 
2567 	bt_dev_dbg(hdev, "err %d", err);
2568 
2569 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2570 			  mgmt_status(err), hdev->dev_class, 3);
2571 
2572 	mgmt_pending_free(cmd);
2573 }
2574 
add_uuid_sync(struct hci_dev * hdev,void * data)2575 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2576 {
2577 	int err;
2578 
2579 	err = hci_update_class_sync(hdev);
2580 	if (err)
2581 		return err;
2582 
2583 	return hci_update_eir_sync(hdev);
2584 }
2585 
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2586 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2587 {
2588 	struct mgmt_cp_add_uuid *cp = data;
2589 	struct mgmt_pending_cmd *cmd;
2590 	struct bt_uuid *uuid;
2591 	int err;
2592 
2593 	bt_dev_dbg(hdev, "sock %p", sk);
2594 
2595 	hci_dev_lock(hdev);
2596 
2597 	if (pending_eir_or_class(hdev)) {
2598 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2599 				      MGMT_STATUS_BUSY);
2600 		goto failed;
2601 	}
2602 
2603 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2604 	if (!uuid) {
2605 		err = -ENOMEM;
2606 		goto failed;
2607 	}
2608 
2609 	memcpy(uuid->uuid, cp->uuid, 16);
2610 	uuid->svc_hint = cp->svc_hint;
2611 	uuid->size = get_uuid_size(cp->uuid);
2612 
2613 	list_add_tail(&uuid->list, &hdev->uuids);
2614 
2615 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2616 	if (!cmd) {
2617 		err = -ENOMEM;
2618 		goto failed;
2619 	}
2620 
2621 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2622 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2623 	 */
2624 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2625 				  mgmt_class_complete);
2626 	if (err < 0) {
2627 		mgmt_pending_free(cmd);
2628 		goto failed;
2629 	}
2630 
2631 failed:
2632 	hci_dev_unlock(hdev);
2633 	return err;
2634 }
2635 
enable_service_cache(struct hci_dev * hdev)2636 static bool enable_service_cache(struct hci_dev *hdev)
2637 {
2638 	if (!hdev_is_powered(hdev))
2639 		return false;
2640 
2641 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2642 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2643 				   CACHE_TIMEOUT);
2644 		return true;
2645 	}
2646 
2647 	return false;
2648 }
2649 
remove_uuid_sync(struct hci_dev * hdev,void * data)2650 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2651 {
2652 	int err;
2653 
2654 	err = hci_update_class_sync(hdev);
2655 	if (err)
2656 		return err;
2657 
2658 	return hci_update_eir_sync(hdev);
2659 }
2660 
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2661 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2662 		       u16 len)
2663 {
2664 	struct mgmt_cp_remove_uuid *cp = data;
2665 	struct mgmt_pending_cmd *cmd;
2666 	struct bt_uuid *match, *tmp;
2667 	static const u8 bt_uuid_any[] = {
2668 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2669 	};
2670 	int err, found;
2671 
2672 	bt_dev_dbg(hdev, "sock %p", sk);
2673 
2674 	hci_dev_lock(hdev);
2675 
2676 	if (pending_eir_or_class(hdev)) {
2677 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2678 				      MGMT_STATUS_BUSY);
2679 		goto unlock;
2680 	}
2681 
2682 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2683 		hci_uuids_clear(hdev);
2684 
2685 		if (enable_service_cache(hdev)) {
2686 			err = mgmt_cmd_complete(sk, hdev->id,
2687 						MGMT_OP_REMOVE_UUID,
2688 						0, hdev->dev_class, 3);
2689 			goto unlock;
2690 		}
2691 
2692 		goto update_class;
2693 	}
2694 
2695 	found = 0;
2696 
2697 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2698 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2699 			continue;
2700 
2701 		list_del(&match->list);
2702 		kfree(match);
2703 		found++;
2704 	}
2705 
2706 	if (found == 0) {
2707 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2708 				      MGMT_STATUS_INVALID_PARAMS);
2709 		goto unlock;
2710 	}
2711 
2712 update_class:
2713 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2714 	if (!cmd) {
2715 		err = -ENOMEM;
2716 		goto unlock;
2717 	}
2718 
2719 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2720 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2721 	 */
2722 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2723 				  mgmt_class_complete);
2724 	if (err < 0)
2725 		mgmt_pending_free(cmd);
2726 
2727 unlock:
2728 	hci_dev_unlock(hdev);
2729 	return err;
2730 }
2731 
set_class_sync(struct hci_dev * hdev,void * data)2732 static int set_class_sync(struct hci_dev *hdev, void *data)
2733 {
2734 	int err = 0;
2735 
2736 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2737 		cancel_delayed_work_sync(&hdev->service_cache);
2738 		err = hci_update_eir_sync(hdev);
2739 	}
2740 
2741 	if (err)
2742 		return err;
2743 
2744 	return hci_update_class_sync(hdev);
2745 }
2746 
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2747 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2748 			 u16 len)
2749 {
2750 	struct mgmt_cp_set_dev_class *cp = data;
2751 	struct mgmt_pending_cmd *cmd;
2752 	int err;
2753 
2754 	bt_dev_dbg(hdev, "sock %p", sk);
2755 
2756 	if (!lmp_bredr_capable(hdev))
2757 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2758 				       MGMT_STATUS_NOT_SUPPORTED);
2759 
2760 	hci_dev_lock(hdev);
2761 
2762 	if (pending_eir_or_class(hdev)) {
2763 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2764 				      MGMT_STATUS_BUSY);
2765 		goto unlock;
2766 	}
2767 
2768 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2769 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2770 				      MGMT_STATUS_INVALID_PARAMS);
2771 		goto unlock;
2772 	}
2773 
2774 	hdev->major_class = cp->major;
2775 	hdev->minor_class = cp->minor;
2776 
2777 	if (!hdev_is_powered(hdev)) {
2778 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2779 					hdev->dev_class, 3);
2780 		goto unlock;
2781 	}
2782 
2783 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2784 	if (!cmd) {
2785 		err = -ENOMEM;
2786 		goto unlock;
2787 	}
2788 
2789 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2790 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2791 	 */
2792 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2793 				  mgmt_class_complete);
2794 	if (err < 0)
2795 		mgmt_pending_free(cmd);
2796 
2797 unlock:
2798 	hci_dev_unlock(hdev);
2799 	return err;
2800 }
2801 
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2802 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2803 			  u16 len)
2804 {
2805 	struct mgmt_cp_load_link_keys *cp = data;
2806 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2807 				   sizeof(struct mgmt_link_key_info));
2808 	u16 key_count, expected_len;
2809 	bool changed;
2810 	int i;
2811 
2812 	bt_dev_dbg(hdev, "sock %p", sk);
2813 
2814 	if (!lmp_bredr_capable(hdev))
2815 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2816 				       MGMT_STATUS_NOT_SUPPORTED);
2817 
2818 	key_count = __le16_to_cpu(cp->key_count);
2819 	if (key_count > max_key_count) {
2820 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2821 			   key_count);
2822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 				       MGMT_STATUS_INVALID_PARAMS);
2824 	}
2825 
2826 	expected_len = struct_size(cp, keys, key_count);
2827 	if (expected_len != len) {
2828 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2829 			   expected_len, len);
2830 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2831 				       MGMT_STATUS_INVALID_PARAMS);
2832 	}
2833 
2834 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2835 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2836 				       MGMT_STATUS_INVALID_PARAMS);
2837 
2838 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2839 		   key_count);
2840 
2841 	hci_dev_lock(hdev);
2842 
2843 	hci_link_keys_clear(hdev);
2844 
2845 	if (cp->debug_keys)
2846 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2847 	else
2848 		changed = hci_dev_test_and_clear_flag(hdev,
2849 						      HCI_KEEP_DEBUG_KEYS);
2850 
2851 	if (changed)
2852 		new_settings(hdev, NULL);
2853 
2854 	for (i = 0; i < key_count; i++) {
2855 		struct mgmt_link_key_info *key = &cp->keys[i];
2856 
2857 		if (hci_is_blocked_key(hdev,
2858 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2859 				       key->val)) {
2860 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2861 				    &key->addr.bdaddr);
2862 			continue;
2863 		}
2864 
2865 		if (key->addr.type != BDADDR_BREDR) {
2866 			bt_dev_warn(hdev,
2867 				    "Invalid link address type %u for %pMR",
2868 				    key->addr.type, &key->addr.bdaddr);
2869 			continue;
2870 		}
2871 
2872 		if (key->type > 0x08) {
2873 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2874 				    key->type, &key->addr.bdaddr);
2875 			continue;
2876 		}
2877 
2878 		/* Always ignore debug keys and require a new pairing if
2879 		 * the user wants to use them.
2880 		 */
2881 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2882 			continue;
2883 
2884 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2885 				 key->type, key->pin_len, NULL);
2886 	}
2887 
2888 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2889 
2890 	hci_dev_unlock(hdev);
2891 
2892 	return 0;
2893 }
2894 
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2895 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2896 			   u8 addr_type, struct sock *skip_sk)
2897 {
2898 	struct mgmt_ev_device_unpaired ev;
2899 
2900 	bacpy(&ev.addr.bdaddr, bdaddr);
2901 	ev.addr.type = addr_type;
2902 
2903 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2904 			  skip_sk);
2905 }
2906 
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2907 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2908 {
2909 	struct mgmt_pending_cmd *cmd = data;
2910 	struct mgmt_cp_unpair_device *cp = cmd->param;
2911 
2912 	if (!err)
2913 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2914 
2915 	cmd->cmd_complete(cmd, err);
2916 	mgmt_pending_free(cmd);
2917 }
2918 
unpair_device_sync(struct hci_dev * hdev,void * data)2919 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2920 {
2921 	struct mgmt_pending_cmd *cmd = data;
2922 	struct mgmt_cp_unpair_device *cp = cmd->param;
2923 	struct hci_conn *conn;
2924 
2925 	if (cp->addr.type == BDADDR_BREDR)
2926 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2927 					       &cp->addr.bdaddr);
2928 	else
2929 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2930 					       le_addr_type(cp->addr.type));
2931 
2932 	if (!conn)
2933 		return 0;
2934 
2935 	/* Disregard any possible error since the likes of hci_abort_conn_sync
2936 	 * will clean up the connection no matter the error.
2937 	 */
2938 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2939 
2940 	return 0;
2941 }
2942 
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2943 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2944 			 u16 len)
2945 {
2946 	struct mgmt_cp_unpair_device *cp = data;
2947 	struct mgmt_rp_unpair_device rp;
2948 	struct hci_conn_params *params;
2949 	struct mgmt_pending_cmd *cmd;
2950 	struct hci_conn *conn;
2951 	u8 addr_type;
2952 	int err;
2953 
2954 	memset(&rp, 0, sizeof(rp));
2955 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2956 	rp.addr.type = cp->addr.type;
2957 
2958 	if (!bdaddr_type_is_valid(cp->addr.type))
2959 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2960 					 MGMT_STATUS_INVALID_PARAMS,
2961 					 &rp, sizeof(rp));
2962 
2963 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2964 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2965 					 MGMT_STATUS_INVALID_PARAMS,
2966 					 &rp, sizeof(rp));
2967 
2968 	hci_dev_lock(hdev);
2969 
2970 	if (!hdev_is_powered(hdev)) {
2971 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2972 					MGMT_STATUS_NOT_POWERED, &rp,
2973 					sizeof(rp));
2974 		goto unlock;
2975 	}
2976 
2977 	if (cp->addr.type == BDADDR_BREDR) {
2978 		/* If disconnection is requested, then look up the
2979 		 * connection. If the remote device is connected, it
2980 		 * will be later used to terminate the link.
2981 		 *
2982 		 * Setting it to NULL explicitly will cause no
2983 		 * termination of the link.
2984 		 */
2985 		if (cp->disconnect)
2986 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2987 						       &cp->addr.bdaddr);
2988 		else
2989 			conn = NULL;
2990 
2991 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2992 		if (err < 0) {
2993 			err = mgmt_cmd_complete(sk, hdev->id,
2994 						MGMT_OP_UNPAIR_DEVICE,
2995 						MGMT_STATUS_NOT_PAIRED, &rp,
2996 						sizeof(rp));
2997 			goto unlock;
2998 		}
2999 
3000 		goto done;
3001 	}
3002 
3003 	/* LE address type */
3004 	addr_type = le_addr_type(cp->addr.type);
3005 
3006 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3007 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3008 	if (err < 0) {
3009 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3010 					MGMT_STATUS_NOT_PAIRED, &rp,
3011 					sizeof(rp));
3012 		goto unlock;
3013 	}
3014 
3015 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3016 	if (!conn) {
3017 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3018 		goto done;
3019 	}
3020 
3021 
3022 	/* Defer clearing up the connection parameters until closing to
3023 	 * give a chance of keeping them if a repairing happens.
3024 	 */
3025 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3026 
3027 	/* Disable auto-connection parameters if present */
3028 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3029 	if (params) {
3030 		if (params->explicit_connect)
3031 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3032 		else
3033 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3034 	}
3035 
3036 	/* If disconnection is not requested, then clear the connection
3037 	 * variable so that the link is not terminated.
3038 	 */
3039 	if (!cp->disconnect)
3040 		conn = NULL;
3041 
3042 done:
3043 	/* If the connection variable is set, then termination of the
3044 	 * link is requested.
3045 	 */
3046 	if (!conn) {
3047 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3048 					&rp, sizeof(rp));
3049 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3050 		goto unlock;
3051 	}
3052 
3053 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3054 			       sizeof(*cp));
3055 	if (!cmd) {
3056 		err = -ENOMEM;
3057 		goto unlock;
3058 	}
3059 
3060 	cmd->cmd_complete = addr_cmd_complete;
3061 
3062 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3063 				 unpair_device_complete);
3064 	if (err < 0)
3065 		mgmt_pending_free(cmd);
3066 
3067 unlock:
3068 	hci_dev_unlock(hdev);
3069 	return err;
3070 }
3071 
disconnect_complete(struct hci_dev * hdev,void * data,int err)3072 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3073 {
3074 	struct mgmt_pending_cmd *cmd = data;
3075 
3076 	cmd->cmd_complete(cmd, mgmt_status(err));
3077 	mgmt_pending_free(cmd);
3078 }
3079 
disconnect_sync(struct hci_dev * hdev,void * data)3080 static int disconnect_sync(struct hci_dev *hdev, void *data)
3081 {
3082 	struct mgmt_pending_cmd *cmd = data;
3083 	struct mgmt_cp_disconnect *cp = cmd->param;
3084 	struct hci_conn *conn;
3085 
3086 	if (cp->addr.type == BDADDR_BREDR)
3087 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3088 					       &cp->addr.bdaddr);
3089 	else
3090 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3091 					       le_addr_type(cp->addr.type));
3092 
3093 	if (!conn)
3094 		return -ENOTCONN;
3095 
3096 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3097 	 * will clean up the connection no matter the error.
3098 	 */
3099 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3100 
3101 	return 0;
3102 }
3103 
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3104 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3105 		      u16 len)
3106 {
3107 	struct mgmt_cp_disconnect *cp = data;
3108 	struct mgmt_rp_disconnect rp;
3109 	struct mgmt_pending_cmd *cmd;
3110 	int err;
3111 
3112 	bt_dev_dbg(hdev, "sock %p", sk);
3113 
3114 	memset(&rp, 0, sizeof(rp));
3115 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3116 	rp.addr.type = cp->addr.type;
3117 
3118 	if (!bdaddr_type_is_valid(cp->addr.type))
3119 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3120 					 MGMT_STATUS_INVALID_PARAMS,
3121 					 &rp, sizeof(rp));
3122 
3123 	hci_dev_lock(hdev);
3124 
3125 	if (!test_bit(HCI_UP, &hdev->flags)) {
3126 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3127 					MGMT_STATUS_NOT_POWERED, &rp,
3128 					sizeof(rp));
3129 		goto failed;
3130 	}
3131 
3132 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3133 	if (!cmd) {
3134 		err = -ENOMEM;
3135 		goto failed;
3136 	}
3137 
3138 	cmd->cmd_complete = generic_cmd_complete;
3139 
3140 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3141 				 disconnect_complete);
3142 	if (err < 0)
3143 		mgmt_pending_free(cmd);
3144 
3145 failed:
3146 	hci_dev_unlock(hdev);
3147 	return err;
3148 }
3149 
link_to_bdaddr(u8 link_type,u8 addr_type)3150 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3151 {
3152 	switch (link_type) {
3153 	case ISO_LINK:
3154 	case LE_LINK:
3155 		switch (addr_type) {
3156 		case ADDR_LE_DEV_PUBLIC:
3157 			return BDADDR_LE_PUBLIC;
3158 
3159 		default:
3160 			/* Fallback to LE Random address type */
3161 			return BDADDR_LE_RANDOM;
3162 		}
3163 
3164 	default:
3165 		/* Fallback to BR/EDR type */
3166 		return BDADDR_BREDR;
3167 	}
3168 }
3169 
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3170 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3171 			   u16 data_len)
3172 {
3173 	struct mgmt_rp_get_connections *rp;
3174 	struct hci_conn *c;
3175 	int err;
3176 	u16 i;
3177 
3178 	bt_dev_dbg(hdev, "sock %p", sk);
3179 
3180 	hci_dev_lock(hdev);
3181 
3182 	if (!hdev_is_powered(hdev)) {
3183 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3184 				      MGMT_STATUS_NOT_POWERED);
3185 		goto unlock;
3186 	}
3187 
3188 	i = 0;
3189 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3190 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3191 			i++;
3192 	}
3193 
3194 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3195 	if (!rp) {
3196 		err = -ENOMEM;
3197 		goto unlock;
3198 	}
3199 
3200 	i = 0;
3201 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3202 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3203 			continue;
3204 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3205 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3206 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3207 			continue;
3208 		i++;
3209 	}
3210 
3211 	rp->conn_count = cpu_to_le16(i);
3212 
3213 	/* Recalculate length in case of filtered SCO connections, etc */
3214 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3215 				struct_size(rp, addr, i));
3216 
3217 	kfree(rp);
3218 
3219 unlock:
3220 	hci_dev_unlock(hdev);
3221 	return err;
3222 }
3223 
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3224 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3225 				   struct mgmt_cp_pin_code_neg_reply *cp)
3226 {
3227 	struct mgmt_pending_cmd *cmd;
3228 	int err;
3229 
3230 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3231 			       sizeof(*cp));
3232 	if (!cmd)
3233 		return -ENOMEM;
3234 
3235 	cmd->cmd_complete = addr_cmd_complete;
3236 
3237 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3238 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3239 	if (err < 0)
3240 		mgmt_pending_remove(cmd);
3241 
3242 	return err;
3243 }
3244 
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3245 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3246 			  u16 len)
3247 {
3248 	struct hci_conn *conn;
3249 	struct mgmt_cp_pin_code_reply *cp = data;
3250 	struct hci_cp_pin_code_reply reply;
3251 	struct mgmt_pending_cmd *cmd;
3252 	int err;
3253 
3254 	bt_dev_dbg(hdev, "sock %p", sk);
3255 
3256 	hci_dev_lock(hdev);
3257 
3258 	if (!hdev_is_powered(hdev)) {
3259 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3260 				      MGMT_STATUS_NOT_POWERED);
3261 		goto failed;
3262 	}
3263 
3264 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3265 	if (!conn) {
3266 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3267 				      MGMT_STATUS_NOT_CONNECTED);
3268 		goto failed;
3269 	}
3270 
3271 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3272 		struct mgmt_cp_pin_code_neg_reply ncp;
3273 
3274 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3275 
3276 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3277 
3278 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3279 		if (err >= 0)
3280 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3281 					      MGMT_STATUS_INVALID_PARAMS);
3282 
3283 		goto failed;
3284 	}
3285 
3286 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3287 	if (!cmd) {
3288 		err = -ENOMEM;
3289 		goto failed;
3290 	}
3291 
3292 	cmd->cmd_complete = addr_cmd_complete;
3293 
3294 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3295 	reply.pin_len = cp->pin_len;
3296 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3297 
3298 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3299 	if (err < 0)
3300 		mgmt_pending_remove(cmd);
3301 
3302 failed:
3303 	hci_dev_unlock(hdev);
3304 	return err;
3305 }
3306 
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3307 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3308 			     u16 len)
3309 {
3310 	struct mgmt_cp_set_io_capability *cp = data;
3311 
3312 	bt_dev_dbg(hdev, "sock %p", sk);
3313 
3314 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3315 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3316 				       MGMT_STATUS_INVALID_PARAMS);
3317 
3318 	hci_dev_lock(hdev);
3319 
3320 	hdev->io_capability = cp->io_capability;
3321 
3322 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3323 
3324 	hci_dev_unlock(hdev);
3325 
3326 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3327 				 NULL, 0);
3328 }
3329 
find_pairing(struct hci_conn * conn)3330 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3331 {
3332 	struct hci_dev *hdev = conn->hdev;
3333 	struct mgmt_pending_cmd *cmd;
3334 
3335 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3336 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3337 			continue;
3338 
3339 		if (cmd->user_data != conn)
3340 			continue;
3341 
3342 		return cmd;
3343 	}
3344 
3345 	return NULL;
3346 }
3347 
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3348 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3349 {
3350 	struct mgmt_rp_pair_device rp;
3351 	struct hci_conn *conn = cmd->user_data;
3352 	int err;
3353 
3354 	bacpy(&rp.addr.bdaddr, &conn->dst);
3355 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3356 
3357 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3358 				status, &rp, sizeof(rp));
3359 
3360 	/* So we don't get further callbacks for this connection */
3361 	conn->connect_cfm_cb = NULL;
3362 	conn->security_cfm_cb = NULL;
3363 	conn->disconn_cfm_cb = NULL;
3364 
3365 	hci_conn_drop(conn);
3366 
3367 	/* The device is paired so there is no need to remove
3368 	 * its connection parameters anymore.
3369 	 */
3370 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3371 
3372 	hci_conn_put(conn);
3373 
3374 	return err;
3375 }
3376 
mgmt_smp_complete(struct hci_conn * conn,bool complete)3377 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3378 {
3379 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3380 	struct mgmt_pending_cmd *cmd;
3381 
3382 	cmd = find_pairing(conn);
3383 	if (cmd) {
3384 		cmd->cmd_complete(cmd, status);
3385 		mgmt_pending_remove(cmd);
3386 	}
3387 }
3388 
pairing_complete_cb(struct hci_conn * conn,u8 status)3389 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3390 {
3391 	struct mgmt_pending_cmd *cmd;
3392 
3393 	BT_DBG("status %u", status);
3394 
3395 	cmd = find_pairing(conn);
3396 	if (!cmd) {
3397 		BT_DBG("Unable to find a pending command");
3398 		return;
3399 	}
3400 
3401 	cmd->cmd_complete(cmd, mgmt_status(status));
3402 	mgmt_pending_remove(cmd);
3403 }
3404 
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3405 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3406 {
3407 	struct mgmt_pending_cmd *cmd;
3408 
3409 	BT_DBG("status %u", status);
3410 
3411 	if (!status)
3412 		return;
3413 
3414 	cmd = find_pairing(conn);
3415 	if (!cmd) {
3416 		BT_DBG("Unable to find a pending command");
3417 		return;
3418 	}
3419 
3420 	cmd->cmd_complete(cmd, mgmt_status(status));
3421 	mgmt_pending_remove(cmd);
3422 }
3423 
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3424 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3425 		       u16 len)
3426 {
3427 	struct mgmt_cp_pair_device *cp = data;
3428 	struct mgmt_rp_pair_device rp;
3429 	struct mgmt_pending_cmd *cmd;
3430 	u8 sec_level, auth_type;
3431 	struct hci_conn *conn;
3432 	int err;
3433 
3434 	bt_dev_dbg(hdev, "sock %p", sk);
3435 
3436 	memset(&rp, 0, sizeof(rp));
3437 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3438 	rp.addr.type = cp->addr.type;
3439 
3440 	if (!bdaddr_type_is_valid(cp->addr.type))
3441 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3442 					 MGMT_STATUS_INVALID_PARAMS,
3443 					 &rp, sizeof(rp));
3444 
3445 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3446 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3447 					 MGMT_STATUS_INVALID_PARAMS,
3448 					 &rp, sizeof(rp));
3449 
3450 	hci_dev_lock(hdev);
3451 
3452 	if (!hdev_is_powered(hdev)) {
3453 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3454 					MGMT_STATUS_NOT_POWERED, &rp,
3455 					sizeof(rp));
3456 		goto unlock;
3457 	}
3458 
3459 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3460 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3461 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3462 					sizeof(rp));
3463 		goto unlock;
3464 	}
3465 
3466 	sec_level = BT_SECURITY_MEDIUM;
3467 	auth_type = HCI_AT_DEDICATED_BONDING;
3468 
3469 	if (cp->addr.type == BDADDR_BREDR) {
3470 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3471 				       auth_type, CONN_REASON_PAIR_DEVICE);
3472 	} else {
3473 		u8 addr_type = le_addr_type(cp->addr.type);
3474 		struct hci_conn_params *p;
3475 
3476 		/* When pairing a new device, it is expected to remember
3477 		 * this device for future connections. Adding the connection
3478 		 * parameter information ahead of time allows tracking
3479 		 * of the peripheral preferred values and will speed up any
3480 		 * further connection establishment.
3481 		 *
3482 		 * If connection parameters already exist, then they
3483 		 * will be kept and this function does nothing.
3484 		 */
3485 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3486 		if (!p) {
3487 			err = -EIO;
3488 			goto unlock;
3489 		}
3490 
3491 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3492 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3493 
3494 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3495 					   sec_level, HCI_LE_CONN_TIMEOUT,
3496 					   CONN_REASON_PAIR_DEVICE);
3497 	}
3498 
3499 	if (IS_ERR(conn)) {
3500 		int status;
3501 
3502 		if (PTR_ERR(conn) == -EBUSY)
3503 			status = MGMT_STATUS_BUSY;
3504 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3505 			status = MGMT_STATUS_NOT_SUPPORTED;
3506 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3507 			status = MGMT_STATUS_REJECTED;
3508 		else
3509 			status = MGMT_STATUS_CONNECT_FAILED;
3510 
3511 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3512 					status, &rp, sizeof(rp));
3513 		goto unlock;
3514 	}
3515 
3516 	if (conn->connect_cfm_cb) {
3517 		hci_conn_drop(conn);
3518 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3520 		goto unlock;
3521 	}
3522 
3523 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3524 	if (!cmd) {
3525 		err = -ENOMEM;
3526 		hci_conn_drop(conn);
3527 		goto unlock;
3528 	}
3529 
3530 	cmd->cmd_complete = pairing_complete;
3531 
3532 	/* For LE, just connecting isn't a proof that the pairing finished */
3533 	if (cp->addr.type == BDADDR_BREDR) {
3534 		conn->connect_cfm_cb = pairing_complete_cb;
3535 		conn->security_cfm_cb = pairing_complete_cb;
3536 		conn->disconn_cfm_cb = pairing_complete_cb;
3537 	} else {
3538 		conn->connect_cfm_cb = le_pairing_complete_cb;
3539 		conn->security_cfm_cb = le_pairing_complete_cb;
3540 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3541 	}
3542 
3543 	conn->io_capability = cp->io_cap;
3544 	cmd->user_data = hci_conn_get(conn);
3545 
3546 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3547 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3548 		cmd->cmd_complete(cmd, 0);
3549 		mgmt_pending_remove(cmd);
3550 	}
3551 
3552 	err = 0;
3553 
3554 unlock:
3555 	hci_dev_unlock(hdev);
3556 	return err;
3557 }
3558 
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3559 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3560 			      u16 len)
3561 {
3562 	struct mgmt_addr_info *addr = data;
3563 	struct mgmt_pending_cmd *cmd;
3564 	struct hci_conn *conn;
3565 	int err;
3566 
3567 	bt_dev_dbg(hdev, "sock %p", sk);
3568 
3569 	hci_dev_lock(hdev);
3570 
3571 	if (!hdev_is_powered(hdev)) {
3572 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3573 				      MGMT_STATUS_NOT_POWERED);
3574 		goto unlock;
3575 	}
3576 
3577 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3578 	if (!cmd) {
3579 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3580 				      MGMT_STATUS_INVALID_PARAMS);
3581 		goto unlock;
3582 	}
3583 
3584 	conn = cmd->user_data;
3585 
3586 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3587 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3588 				      MGMT_STATUS_INVALID_PARAMS);
3589 		goto unlock;
3590 	}
3591 
3592 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3593 	mgmt_pending_remove(cmd);
3594 
3595 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3596 				addr, sizeof(*addr));
3597 
3598 	/* Since user doesn't want to proceed with the connection, abort any
3599 	 * ongoing pairing and then terminate the link if it was created
3600 	 * because of the pair device action.
3601 	 */
3602 	if (addr->type == BDADDR_BREDR)
3603 		hci_remove_link_key(hdev, &addr->bdaddr);
3604 	else
3605 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3606 					      le_addr_type(addr->type));
3607 
3608 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3609 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3610 
3611 unlock:
3612 	hci_dev_unlock(hdev);
3613 	return err;
3614 }
3615 
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3616 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3617 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3618 			     u16 hci_op, __le32 passkey)
3619 {
3620 	struct mgmt_pending_cmd *cmd;
3621 	struct hci_conn *conn;
3622 	int err;
3623 
3624 	hci_dev_lock(hdev);
3625 
3626 	if (!hdev_is_powered(hdev)) {
3627 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3628 					MGMT_STATUS_NOT_POWERED, addr,
3629 					sizeof(*addr));
3630 		goto done;
3631 	}
3632 
3633 	if (addr->type == BDADDR_BREDR)
3634 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3635 	else
3636 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3637 					       le_addr_type(addr->type));
3638 
3639 	if (!conn) {
3640 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3641 					MGMT_STATUS_NOT_CONNECTED, addr,
3642 					sizeof(*addr));
3643 		goto done;
3644 	}
3645 
3646 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3647 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3648 		if (!err)
3649 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3650 						MGMT_STATUS_SUCCESS, addr,
3651 						sizeof(*addr));
3652 		else
3653 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3654 						MGMT_STATUS_FAILED, addr,
3655 						sizeof(*addr));
3656 
3657 		goto done;
3658 	}
3659 
3660 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3661 	if (!cmd) {
3662 		err = -ENOMEM;
3663 		goto done;
3664 	}
3665 
3666 	cmd->cmd_complete = addr_cmd_complete;
3667 
3668 	/* Continue with pairing via HCI */
3669 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3670 		struct hci_cp_user_passkey_reply cp;
3671 
3672 		bacpy(&cp.bdaddr, &addr->bdaddr);
3673 		cp.passkey = passkey;
3674 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3675 	} else
3676 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3677 				   &addr->bdaddr);
3678 
3679 	if (err < 0)
3680 		mgmt_pending_remove(cmd);
3681 
3682 done:
3683 	hci_dev_unlock(hdev);
3684 	return err;
3685 }
3686 
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3687 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3688 			      void *data, u16 len)
3689 {
3690 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3691 
3692 	bt_dev_dbg(hdev, "sock %p", sk);
3693 
3694 	return user_pairing_resp(sk, hdev, &cp->addr,
3695 				MGMT_OP_PIN_CODE_NEG_REPLY,
3696 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3697 }
3698 
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3699 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3700 			      u16 len)
3701 {
3702 	struct mgmt_cp_user_confirm_reply *cp = data;
3703 
3704 	bt_dev_dbg(hdev, "sock %p", sk);
3705 
3706 	if (len != sizeof(*cp))
3707 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3708 				       MGMT_STATUS_INVALID_PARAMS);
3709 
3710 	return user_pairing_resp(sk, hdev, &cp->addr,
3711 				 MGMT_OP_USER_CONFIRM_REPLY,
3712 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3713 }
3714 
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3715 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3716 				  void *data, u16 len)
3717 {
3718 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3719 
3720 	bt_dev_dbg(hdev, "sock %p", sk);
3721 
3722 	return user_pairing_resp(sk, hdev, &cp->addr,
3723 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3724 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3725 }
3726 
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3727 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3728 			      u16 len)
3729 {
3730 	struct mgmt_cp_user_passkey_reply *cp = data;
3731 
3732 	bt_dev_dbg(hdev, "sock %p", sk);
3733 
3734 	return user_pairing_resp(sk, hdev, &cp->addr,
3735 				 MGMT_OP_USER_PASSKEY_REPLY,
3736 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3737 }
3738 
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3739 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3740 				  void *data, u16 len)
3741 {
3742 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3743 
3744 	bt_dev_dbg(hdev, "sock %p", sk);
3745 
3746 	return user_pairing_resp(sk, hdev, &cp->addr,
3747 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3748 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3749 }
3750 
adv_expire_sync(struct hci_dev * hdev,u32 flags)3751 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3752 {
3753 	struct adv_info *adv_instance;
3754 
3755 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3756 	if (!adv_instance)
3757 		return 0;
3758 
3759 	/* stop if current instance doesn't need to be changed */
3760 	if (!(adv_instance->flags & flags))
3761 		return 0;
3762 
3763 	cancel_adv_timeout(hdev);
3764 
3765 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3766 	if (!adv_instance)
3767 		return 0;
3768 
3769 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3770 
3771 	return 0;
3772 }
3773 
name_changed_sync(struct hci_dev * hdev,void * data)3774 static int name_changed_sync(struct hci_dev *hdev, void *data)
3775 {
3776 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3777 }
3778 
set_name_complete(struct hci_dev * hdev,void * data,int err)3779 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3780 {
3781 	struct mgmt_pending_cmd *cmd = data;
3782 	struct mgmt_cp_set_local_name *cp = cmd->param;
3783 	u8 status = mgmt_status(err);
3784 
3785 	bt_dev_dbg(hdev, "err %d", err);
3786 
3787 	if (err == -ECANCELED ||
3788 	    cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3789 		return;
3790 
3791 	if (status) {
3792 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3793 				status);
3794 	} else {
3795 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3796 				  cp, sizeof(*cp));
3797 
3798 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3799 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3800 	}
3801 
3802 	mgmt_pending_remove(cmd);
3803 }
3804 
set_name_sync(struct hci_dev * hdev,void * data)3805 static int set_name_sync(struct hci_dev *hdev, void *data)
3806 {
3807 	if (lmp_bredr_capable(hdev)) {
3808 		hci_update_name_sync(hdev);
3809 		hci_update_eir_sync(hdev);
3810 	}
3811 
3812 	/* The name is stored in the scan response data and so
3813 	 * no need to update the advertising data here.
3814 	 */
3815 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3816 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3817 
3818 	return 0;
3819 }
3820 
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3821 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3822 			  u16 len)
3823 {
3824 	struct mgmt_cp_set_local_name *cp = data;
3825 	struct mgmt_pending_cmd *cmd;
3826 	int err;
3827 
3828 	bt_dev_dbg(hdev, "sock %p", sk);
3829 
3830 	hci_dev_lock(hdev);
3831 
3832 	/* If the old values are the same as the new ones just return a
3833 	 * direct command complete event.
3834 	 */
3835 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3836 	    !memcmp(hdev->short_name, cp->short_name,
3837 		    sizeof(hdev->short_name))) {
3838 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3839 					data, len);
3840 		goto failed;
3841 	}
3842 
3843 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3844 
3845 	if (!hdev_is_powered(hdev)) {
3846 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3847 
3848 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3849 					data, len);
3850 		if (err < 0)
3851 			goto failed;
3852 
3853 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3854 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3855 		ext_info_changed(hdev, sk);
3856 
3857 		goto failed;
3858 	}
3859 
3860 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3861 	if (!cmd)
3862 		err = -ENOMEM;
3863 	else
3864 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3865 					 set_name_complete);
3866 
3867 	if (err < 0) {
3868 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3869 				      MGMT_STATUS_FAILED);
3870 
3871 		if (cmd)
3872 			mgmt_pending_remove(cmd);
3873 
3874 		goto failed;
3875 	}
3876 
3877 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3878 
3879 failed:
3880 	hci_dev_unlock(hdev);
3881 	return err;
3882 }
3883 
appearance_changed_sync(struct hci_dev * hdev,void * data)3884 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3885 {
3886 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3887 }
3888 
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3889 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3890 			  u16 len)
3891 {
3892 	struct mgmt_cp_set_appearance *cp = data;
3893 	u16 appearance;
3894 	int err;
3895 
3896 	bt_dev_dbg(hdev, "sock %p", sk);
3897 
3898 	if (!lmp_le_capable(hdev))
3899 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3900 				       MGMT_STATUS_NOT_SUPPORTED);
3901 
3902 	appearance = le16_to_cpu(cp->appearance);
3903 
3904 	hci_dev_lock(hdev);
3905 
3906 	if (hdev->appearance != appearance) {
3907 		hdev->appearance = appearance;
3908 
3909 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3910 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3911 					   NULL);
3912 
3913 		ext_info_changed(hdev, sk);
3914 	}
3915 
3916 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3917 				0);
3918 
3919 	hci_dev_unlock(hdev);
3920 
3921 	return err;
3922 }
3923 
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3924 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3925 				 void *data, u16 len)
3926 {
3927 	struct mgmt_rp_get_phy_configuration rp;
3928 
3929 	bt_dev_dbg(hdev, "sock %p", sk);
3930 
3931 	hci_dev_lock(hdev);
3932 
3933 	memset(&rp, 0, sizeof(rp));
3934 
3935 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3936 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3937 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3938 
3939 	hci_dev_unlock(hdev);
3940 
3941 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3942 				 &rp, sizeof(rp));
3943 }
3944 
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3945 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3946 {
3947 	struct mgmt_ev_phy_configuration_changed ev;
3948 
3949 	memset(&ev, 0, sizeof(ev));
3950 
3951 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3952 
3953 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3954 			  sizeof(ev), skip);
3955 }
3956 
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3957 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3958 {
3959 	struct mgmt_pending_cmd *cmd = data;
3960 	struct sk_buff *skb = cmd->skb;
3961 	u8 status = mgmt_status(err);
3962 
3963 	if (err == -ECANCELED ||
3964 	    cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3965 		return;
3966 
3967 	if (!status) {
3968 		if (!skb)
3969 			status = MGMT_STATUS_FAILED;
3970 		else if (IS_ERR(skb))
3971 			status = mgmt_status(PTR_ERR(skb));
3972 		else
3973 			status = mgmt_status(skb->data[0]);
3974 	}
3975 
3976 	bt_dev_dbg(hdev, "status %d", status);
3977 
3978 	if (status) {
3979 		mgmt_cmd_status(cmd->sk, hdev->id,
3980 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3981 	} else {
3982 		mgmt_cmd_complete(cmd->sk, hdev->id,
3983 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3984 				  NULL, 0);
3985 
3986 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3987 	}
3988 
3989 	if (skb && !IS_ERR(skb))
3990 		kfree_skb(skb);
3991 
3992 	mgmt_pending_remove(cmd);
3993 }
3994 
set_default_phy_sync(struct hci_dev * hdev,void * data)3995 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3996 {
3997 	struct mgmt_pending_cmd *cmd = data;
3998 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3999 	struct hci_cp_le_set_default_phy cp_phy;
4000 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4001 
4002 	memset(&cp_phy, 0, sizeof(cp_phy));
4003 
4004 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4005 		cp_phy.all_phys |= 0x01;
4006 
4007 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4008 		cp_phy.all_phys |= 0x02;
4009 
4010 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4011 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4012 
4013 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4014 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4015 
4016 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4017 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4018 
4019 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4020 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4021 
4022 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4023 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4024 
4025 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4026 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4027 
4028 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4029 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4030 
4031 	return 0;
4032 }
4033 
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4034 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4035 				 void *data, u16 len)
4036 {
4037 	struct mgmt_cp_set_phy_configuration *cp = data;
4038 	struct mgmt_pending_cmd *cmd;
4039 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4040 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4041 	bool changed = false;
4042 	int err;
4043 
4044 	bt_dev_dbg(hdev, "sock %p", sk);
4045 
4046 	configurable_phys = get_configurable_phys(hdev);
4047 	supported_phys = get_supported_phys(hdev);
4048 	selected_phys = __le32_to_cpu(cp->selected_phys);
4049 
4050 	if (selected_phys & ~supported_phys)
4051 		return mgmt_cmd_status(sk, hdev->id,
4052 				       MGMT_OP_SET_PHY_CONFIGURATION,
4053 				       MGMT_STATUS_INVALID_PARAMS);
4054 
4055 	unconfigure_phys = supported_phys & ~configurable_phys;
4056 
4057 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4058 		return mgmt_cmd_status(sk, hdev->id,
4059 				       MGMT_OP_SET_PHY_CONFIGURATION,
4060 				       MGMT_STATUS_INVALID_PARAMS);
4061 
4062 	if (selected_phys == get_selected_phys(hdev))
4063 		return mgmt_cmd_complete(sk, hdev->id,
4064 					 MGMT_OP_SET_PHY_CONFIGURATION,
4065 					 0, NULL, 0);
4066 
4067 	hci_dev_lock(hdev);
4068 
4069 	if (!hdev_is_powered(hdev)) {
4070 		err = mgmt_cmd_status(sk, hdev->id,
4071 				      MGMT_OP_SET_PHY_CONFIGURATION,
4072 				      MGMT_STATUS_REJECTED);
4073 		goto unlock;
4074 	}
4075 
4076 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4077 		err = mgmt_cmd_status(sk, hdev->id,
4078 				      MGMT_OP_SET_PHY_CONFIGURATION,
4079 				      MGMT_STATUS_BUSY);
4080 		goto unlock;
4081 	}
4082 
4083 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4084 		pkt_type |= (HCI_DH3 | HCI_DM3);
4085 	else
4086 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4087 
4088 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4089 		pkt_type |= (HCI_DH5 | HCI_DM5);
4090 	else
4091 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4092 
4093 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4094 		pkt_type &= ~HCI_2DH1;
4095 	else
4096 		pkt_type |= HCI_2DH1;
4097 
4098 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4099 		pkt_type &= ~HCI_2DH3;
4100 	else
4101 		pkt_type |= HCI_2DH3;
4102 
4103 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4104 		pkt_type &= ~HCI_2DH5;
4105 	else
4106 		pkt_type |= HCI_2DH5;
4107 
4108 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4109 		pkt_type &= ~HCI_3DH1;
4110 	else
4111 		pkt_type |= HCI_3DH1;
4112 
4113 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4114 		pkt_type &= ~HCI_3DH3;
4115 	else
4116 		pkt_type |= HCI_3DH3;
4117 
4118 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4119 		pkt_type &= ~HCI_3DH5;
4120 	else
4121 		pkt_type |= HCI_3DH5;
4122 
4123 	if (pkt_type != hdev->pkt_type) {
4124 		hdev->pkt_type = pkt_type;
4125 		changed = true;
4126 	}
4127 
4128 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4129 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4130 		if (changed)
4131 			mgmt_phy_configuration_changed(hdev, sk);
4132 
4133 		err = mgmt_cmd_complete(sk, hdev->id,
4134 					MGMT_OP_SET_PHY_CONFIGURATION,
4135 					0, NULL, 0);
4136 
4137 		goto unlock;
4138 	}
4139 
4140 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4141 			       len);
4142 	if (!cmd)
4143 		err = -ENOMEM;
4144 	else
4145 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4146 					 set_default_phy_complete);
4147 
4148 	if (err < 0) {
4149 		err = mgmt_cmd_status(sk, hdev->id,
4150 				      MGMT_OP_SET_PHY_CONFIGURATION,
4151 				      MGMT_STATUS_FAILED);
4152 
4153 		if (cmd)
4154 			mgmt_pending_remove(cmd);
4155 	}
4156 
4157 unlock:
4158 	hci_dev_unlock(hdev);
4159 
4160 	return err;
4161 }
4162 
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4163 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4164 			    u16 len)
4165 {
4166 	int err = MGMT_STATUS_SUCCESS;
4167 	struct mgmt_cp_set_blocked_keys *keys = data;
4168 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4169 				   sizeof(struct mgmt_blocked_key_info));
4170 	u16 key_count, expected_len;
4171 	int i;
4172 
4173 	bt_dev_dbg(hdev, "sock %p", sk);
4174 
4175 	key_count = __le16_to_cpu(keys->key_count);
4176 	if (key_count > max_key_count) {
4177 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4178 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4179 				       MGMT_STATUS_INVALID_PARAMS);
4180 	}
4181 
4182 	expected_len = struct_size(keys, keys, key_count);
4183 	if (expected_len != len) {
4184 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4185 			   expected_len, len);
4186 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4187 				       MGMT_STATUS_INVALID_PARAMS);
4188 	}
4189 
4190 	hci_dev_lock(hdev);
4191 
4192 	hci_blocked_keys_clear(hdev);
4193 
4194 	for (i = 0; i < key_count; ++i) {
4195 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4196 
4197 		if (!b) {
4198 			err = MGMT_STATUS_NO_RESOURCES;
4199 			break;
4200 		}
4201 
4202 		b->type = keys->keys[i].type;
4203 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4204 		list_add_rcu(&b->list, &hdev->blocked_keys);
4205 	}
4206 	hci_dev_unlock(hdev);
4207 
4208 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4209 				err, NULL, 0);
4210 }
4211 
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4212 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4213 			       void *data, u16 len)
4214 {
4215 	struct mgmt_mode *cp = data;
4216 	int err;
4217 	bool changed = false;
4218 
4219 	bt_dev_dbg(hdev, "sock %p", sk);
4220 
4221 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4222 		return mgmt_cmd_status(sk, hdev->id,
4223 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4224 				       MGMT_STATUS_NOT_SUPPORTED);
4225 
4226 	if (cp->val != 0x00 && cp->val != 0x01)
4227 		return mgmt_cmd_status(sk, hdev->id,
4228 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4229 				       MGMT_STATUS_INVALID_PARAMS);
4230 
4231 	hci_dev_lock(hdev);
4232 
4233 	if (hdev_is_powered(hdev) &&
4234 	    !!cp->val != hci_dev_test_flag(hdev,
4235 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4236 		err = mgmt_cmd_status(sk, hdev->id,
4237 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4238 				      MGMT_STATUS_REJECTED);
4239 		goto unlock;
4240 	}
4241 
4242 	if (cp->val)
4243 		changed = !hci_dev_test_and_set_flag(hdev,
4244 						   HCI_WIDEBAND_SPEECH_ENABLED);
4245 	else
4246 		changed = hci_dev_test_and_clear_flag(hdev,
4247 						   HCI_WIDEBAND_SPEECH_ENABLED);
4248 
4249 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4250 	if (err < 0)
4251 		goto unlock;
4252 
4253 	if (changed)
4254 		err = new_settings(hdev, sk);
4255 
4256 unlock:
4257 	hci_dev_unlock(hdev);
4258 	return err;
4259 }
4260 
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4261 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4262 			       void *data, u16 data_len)
4263 {
4264 	char buf[20];
4265 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4266 	u16 cap_len = 0;
4267 	u8 flags = 0;
4268 	u8 tx_power_range[2];
4269 
4270 	bt_dev_dbg(hdev, "sock %p", sk);
4271 
4272 	memset(&buf, 0, sizeof(buf));
4273 
4274 	hci_dev_lock(hdev);
4275 
4276 	/* When the Read Simple Pairing Options command is supported, then
4277 	 * the remote public key validation is supported.
4278 	 *
4279 	 * Alternatively, when Microsoft extensions are available, they can
4280 	 * indicate support for public key validation as well.
4281 	 */
4282 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4283 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4284 
4285 	flags |= 0x02;		/* Remote public key validation (LE) */
4286 
4287 	/* When the Read Encryption Key Size command is supported, then the
4288 	 * encryption key size is enforced.
4289 	 */
4290 	if (hdev->commands[20] & 0x10)
4291 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4292 
4293 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4294 
4295 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4296 				  &flags, 1);
4297 
4298 	/* When the Read Simple Pairing Options command is supported, then
4299 	 * also max encryption key size information is provided.
4300 	 */
4301 	if (hdev->commands[41] & 0x08)
4302 		cap_len = eir_append_le16(rp->cap, cap_len,
4303 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4304 					  hdev->max_enc_key_size);
4305 
4306 	cap_len = eir_append_le16(rp->cap, cap_len,
4307 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4308 				  SMP_MAX_ENC_KEY_SIZE);
4309 
4310 	/* Append the min/max LE tx power parameters if we were able to fetch
4311 	 * it from the controller
4312 	 */
4313 	if (hdev->commands[38] & 0x80) {
4314 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4315 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4316 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4317 					  tx_power_range, 2);
4318 	}
4319 
4320 	rp->cap_len = cpu_to_le16(cap_len);
4321 
4322 	hci_dev_unlock(hdev);
4323 
4324 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4325 				 rp, sizeof(*rp) + cap_len);
4326 }
4327 
4328 #ifdef CONFIG_BT_FEATURE_DEBUG
4329 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4330 static const u8 debug_uuid[16] = {
4331 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4332 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4333 };
4334 #endif
4335 
4336 /* 330859bc-7506-492d-9370-9a6f0614037f */
4337 static const u8 quality_report_uuid[16] = {
4338 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4339 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4340 };
4341 
4342 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4343 static const u8 offload_codecs_uuid[16] = {
4344 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4345 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4346 };
4347 
4348 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4349 static const u8 le_simultaneous_roles_uuid[16] = {
4350 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4351 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4352 };
4353 
4354 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4355 static const u8 rpa_resolution_uuid[16] = {
4356 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4357 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4358 };
4359 
4360 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4361 static const u8 iso_socket_uuid[16] = {
4362 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4363 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4364 };
4365 
4366 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4367 static const u8 mgmt_mesh_uuid[16] = {
4368 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4369 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4370 };
4371 
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4372 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4373 				  void *data, u16 data_len)
4374 {
4375 	struct mgmt_rp_read_exp_features_info *rp;
4376 	size_t len;
4377 	u16 idx = 0;
4378 	u32 flags;
4379 	int status;
4380 
4381 	bt_dev_dbg(hdev, "sock %p", sk);
4382 
4383 	/* Enough space for 7 features */
4384 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4385 	rp = kzalloc(len, GFP_KERNEL);
4386 	if (!rp)
4387 		return -ENOMEM;
4388 
4389 #ifdef CONFIG_BT_FEATURE_DEBUG
4390 	if (!hdev) {
4391 		flags = bt_dbg_get() ? BIT(0) : 0;
4392 
4393 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4394 		rp->features[idx].flags = cpu_to_le32(flags);
4395 		idx++;
4396 	}
4397 #endif
4398 
4399 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4400 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4401 			flags = BIT(0);
4402 		else
4403 			flags = 0;
4404 
4405 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4406 		rp->features[idx].flags = cpu_to_le32(flags);
4407 		idx++;
4408 	}
4409 
4410 	if (hdev && ll_privacy_capable(hdev)) {
4411 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4412 			flags = BIT(0) | BIT(1);
4413 		else
4414 			flags = BIT(1);
4415 
4416 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4417 		rp->features[idx].flags = cpu_to_le32(flags);
4418 		idx++;
4419 	}
4420 
4421 	if (hdev && (aosp_has_quality_report(hdev) ||
4422 		     hdev->set_quality_report)) {
4423 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4424 			flags = BIT(0);
4425 		else
4426 			flags = 0;
4427 
4428 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4429 		rp->features[idx].flags = cpu_to_le32(flags);
4430 		idx++;
4431 	}
4432 
4433 	if (hdev && hdev->get_data_path_id) {
4434 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4435 			flags = BIT(0);
4436 		else
4437 			flags = 0;
4438 
4439 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4440 		rp->features[idx].flags = cpu_to_le32(flags);
4441 		idx++;
4442 	}
4443 
4444 	if (IS_ENABLED(CONFIG_BT_LE)) {
4445 		flags = iso_enabled() ? BIT(0) : 0;
4446 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4447 		rp->features[idx].flags = cpu_to_le32(flags);
4448 		idx++;
4449 	}
4450 
4451 	if (hdev && lmp_le_capable(hdev)) {
4452 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4453 			flags = BIT(0);
4454 		else
4455 			flags = 0;
4456 
4457 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4458 		rp->features[idx].flags = cpu_to_le32(flags);
4459 		idx++;
4460 	}
4461 
4462 	rp->feature_count = cpu_to_le16(idx);
4463 
4464 	/* After reading the experimental features information, enable
4465 	 * the events to update client on any future change.
4466 	 */
4467 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4468 
4469 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4470 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4471 				   0, rp, sizeof(*rp) + (20 * idx));
4472 
4473 	kfree(rp);
4474 	return status;
4475 }
4476 
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4477 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4478 					  struct sock *skip)
4479 {
4480 	struct mgmt_ev_exp_feature_changed ev;
4481 
4482 	memset(&ev, 0, sizeof(ev));
4483 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4484 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4485 
4486 	// Do we need to be atomic with the conn_flags?
4487 	if (enabled && privacy_mode_capable(hdev))
4488 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4489 	else
4490 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4491 
4492 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4493 				  &ev, sizeof(ev),
4494 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4495 
4496 }
4497 
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4498 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4499 			       bool enabled, struct sock *skip)
4500 {
4501 	struct mgmt_ev_exp_feature_changed ev;
4502 
4503 	memset(&ev, 0, sizeof(ev));
4504 	memcpy(ev.uuid, uuid, 16);
4505 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4506 
4507 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4508 				  &ev, sizeof(ev),
4509 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4510 }
4511 
4512 #define EXP_FEAT(_uuid, _set_func)	\
4513 {					\
4514 	.uuid = _uuid,			\
4515 	.set_func = _set_func,		\
4516 }
4517 
4518 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4519 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4520 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4521 {
4522 	struct mgmt_rp_set_exp_feature rp;
4523 
4524 	memset(rp.uuid, 0, 16);
4525 	rp.flags = cpu_to_le32(0);
4526 
4527 #ifdef CONFIG_BT_FEATURE_DEBUG
4528 	if (!hdev) {
4529 		bool changed = bt_dbg_get();
4530 
4531 		bt_dbg_set(false);
4532 
4533 		if (changed)
4534 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4535 	}
4536 #endif
4537 
4538 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4539 		bool changed;
4540 
4541 		changed = hci_dev_test_and_clear_flag(hdev,
4542 						      HCI_ENABLE_LL_PRIVACY);
4543 		if (changed)
4544 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4545 					    sk);
4546 	}
4547 
4548 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4549 
4550 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4551 				 MGMT_OP_SET_EXP_FEATURE, 0,
4552 				 &rp, sizeof(rp));
4553 }
4554 
4555 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4556 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4557 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4558 {
4559 	struct mgmt_rp_set_exp_feature rp;
4560 
4561 	bool val, changed;
4562 	int err;
4563 
4564 	/* Command requires to use the non-controller index */
4565 	if (hdev)
4566 		return mgmt_cmd_status(sk, hdev->id,
4567 				       MGMT_OP_SET_EXP_FEATURE,
4568 				       MGMT_STATUS_INVALID_INDEX);
4569 
4570 	/* Parameters are limited to a single octet */
4571 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4572 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4573 				       MGMT_OP_SET_EXP_FEATURE,
4574 				       MGMT_STATUS_INVALID_PARAMS);
4575 
4576 	/* Only boolean on/off is supported */
4577 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4578 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4579 				       MGMT_OP_SET_EXP_FEATURE,
4580 				       MGMT_STATUS_INVALID_PARAMS);
4581 
4582 	val = !!cp->param[0];
4583 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4584 	bt_dbg_set(val);
4585 
4586 	memcpy(rp.uuid, debug_uuid, 16);
4587 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4588 
4589 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4590 
4591 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4592 				MGMT_OP_SET_EXP_FEATURE, 0,
4593 				&rp, sizeof(rp));
4594 
4595 	if (changed)
4596 		exp_feature_changed(hdev, debug_uuid, val, sk);
4597 
4598 	return err;
4599 }
4600 #endif
4601 
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4602 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4603 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4604 {
4605 	struct mgmt_rp_set_exp_feature rp;
4606 	bool val, changed;
4607 	int err;
4608 
4609 	/* Command requires to use the controller index */
4610 	if (!hdev)
4611 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4612 				       MGMT_OP_SET_EXP_FEATURE,
4613 				       MGMT_STATUS_INVALID_INDEX);
4614 
4615 	/* Parameters are limited to a single octet */
4616 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4617 		return mgmt_cmd_status(sk, hdev->id,
4618 				       MGMT_OP_SET_EXP_FEATURE,
4619 				       MGMT_STATUS_INVALID_PARAMS);
4620 
4621 	/* Only boolean on/off is supported */
4622 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4623 		return mgmt_cmd_status(sk, hdev->id,
4624 				       MGMT_OP_SET_EXP_FEATURE,
4625 				       MGMT_STATUS_INVALID_PARAMS);
4626 
4627 	val = !!cp->param[0];
4628 
4629 	if (val) {
4630 		changed = !hci_dev_test_and_set_flag(hdev,
4631 						     HCI_MESH_EXPERIMENTAL);
4632 	} else {
4633 		hci_dev_clear_flag(hdev, HCI_MESH);
4634 		changed = hci_dev_test_and_clear_flag(hdev,
4635 						      HCI_MESH_EXPERIMENTAL);
4636 	}
4637 
4638 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4639 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4640 
4641 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4642 
4643 	err = mgmt_cmd_complete(sk, hdev->id,
4644 				MGMT_OP_SET_EXP_FEATURE, 0,
4645 				&rp, sizeof(rp));
4646 
4647 	if (changed)
4648 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4649 
4650 	return err;
4651 }
4652 
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4653 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4654 				   struct mgmt_cp_set_exp_feature *cp,
4655 				   u16 data_len)
4656 {
4657 	struct mgmt_rp_set_exp_feature rp;
4658 	bool val, changed;
4659 	int err;
4660 	u32 flags;
4661 
4662 	/* Command requires to use the controller index */
4663 	if (!hdev)
4664 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4665 				       MGMT_OP_SET_EXP_FEATURE,
4666 				       MGMT_STATUS_INVALID_INDEX);
4667 
4668 	/* Changes can only be made when controller is powered down */
4669 	if (hdev_is_powered(hdev))
4670 		return mgmt_cmd_status(sk, hdev->id,
4671 				       MGMT_OP_SET_EXP_FEATURE,
4672 				       MGMT_STATUS_REJECTED);
4673 
4674 	/* Parameters are limited to a single octet */
4675 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4676 		return mgmt_cmd_status(sk, hdev->id,
4677 				       MGMT_OP_SET_EXP_FEATURE,
4678 				       MGMT_STATUS_INVALID_PARAMS);
4679 
4680 	/* Only boolean on/off is supported */
4681 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4682 		return mgmt_cmd_status(sk, hdev->id,
4683 				       MGMT_OP_SET_EXP_FEATURE,
4684 				       MGMT_STATUS_INVALID_PARAMS);
4685 
4686 	val = !!cp->param[0];
4687 
4688 	if (val) {
4689 		changed = !hci_dev_test_and_set_flag(hdev,
4690 						     HCI_ENABLE_LL_PRIVACY);
4691 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4692 
4693 		/* Enable LL privacy + supported settings changed */
4694 		flags = BIT(0) | BIT(1);
4695 	} else {
4696 		changed = hci_dev_test_and_clear_flag(hdev,
4697 						      HCI_ENABLE_LL_PRIVACY);
4698 
4699 		/* Disable LL privacy + supported settings changed */
4700 		flags = BIT(1);
4701 	}
4702 
4703 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4704 	rp.flags = cpu_to_le32(flags);
4705 
4706 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4707 
4708 	err = mgmt_cmd_complete(sk, hdev->id,
4709 				MGMT_OP_SET_EXP_FEATURE, 0,
4710 				&rp, sizeof(rp));
4711 
4712 	if (changed)
4713 		exp_ll_privacy_feature_changed(val, hdev, sk);
4714 
4715 	return err;
4716 }
4717 
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4718 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4719 				   struct mgmt_cp_set_exp_feature *cp,
4720 				   u16 data_len)
4721 {
4722 	struct mgmt_rp_set_exp_feature rp;
4723 	bool val, changed;
4724 	int err;
4725 
4726 	/* Command requires to use a valid controller index */
4727 	if (!hdev)
4728 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4729 				       MGMT_OP_SET_EXP_FEATURE,
4730 				       MGMT_STATUS_INVALID_INDEX);
4731 
4732 	/* Parameters are limited to a single octet */
4733 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4734 		return mgmt_cmd_status(sk, hdev->id,
4735 				       MGMT_OP_SET_EXP_FEATURE,
4736 				       MGMT_STATUS_INVALID_PARAMS);
4737 
4738 	/* Only boolean on/off is supported */
4739 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4740 		return mgmt_cmd_status(sk, hdev->id,
4741 				       MGMT_OP_SET_EXP_FEATURE,
4742 				       MGMT_STATUS_INVALID_PARAMS);
4743 
4744 	hci_req_sync_lock(hdev);
4745 
4746 	val = !!cp->param[0];
4747 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4748 
4749 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4750 		err = mgmt_cmd_status(sk, hdev->id,
4751 				      MGMT_OP_SET_EXP_FEATURE,
4752 				      MGMT_STATUS_NOT_SUPPORTED);
4753 		goto unlock_quality_report;
4754 	}
4755 
4756 	if (changed) {
4757 		if (hdev->set_quality_report)
4758 			err = hdev->set_quality_report(hdev, val);
4759 		else
4760 			err = aosp_set_quality_report(hdev, val);
4761 
4762 		if (err) {
4763 			err = mgmt_cmd_status(sk, hdev->id,
4764 					      MGMT_OP_SET_EXP_FEATURE,
4765 					      MGMT_STATUS_FAILED);
4766 			goto unlock_quality_report;
4767 		}
4768 
4769 		if (val)
4770 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4771 		else
4772 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4773 	}
4774 
4775 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4776 
4777 	memcpy(rp.uuid, quality_report_uuid, 16);
4778 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4779 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4780 
4781 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4782 				&rp, sizeof(rp));
4783 
4784 	if (changed)
4785 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4786 
4787 unlock_quality_report:
4788 	hci_req_sync_unlock(hdev);
4789 	return err;
4790 }
4791 
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4792 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4793 				  struct mgmt_cp_set_exp_feature *cp,
4794 				  u16 data_len)
4795 {
4796 	bool val, changed;
4797 	int err;
4798 	struct mgmt_rp_set_exp_feature rp;
4799 
4800 	/* Command requires to use a valid controller index */
4801 	if (!hdev)
4802 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4803 				       MGMT_OP_SET_EXP_FEATURE,
4804 				       MGMT_STATUS_INVALID_INDEX);
4805 
4806 	/* Parameters are limited to a single octet */
4807 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4808 		return mgmt_cmd_status(sk, hdev->id,
4809 				       MGMT_OP_SET_EXP_FEATURE,
4810 				       MGMT_STATUS_INVALID_PARAMS);
4811 
4812 	/* Only boolean on/off is supported */
4813 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4814 		return mgmt_cmd_status(sk, hdev->id,
4815 				       MGMT_OP_SET_EXP_FEATURE,
4816 				       MGMT_STATUS_INVALID_PARAMS);
4817 
4818 	val = !!cp->param[0];
4819 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4820 
4821 	if (!hdev->get_data_path_id) {
4822 		return mgmt_cmd_status(sk, hdev->id,
4823 				       MGMT_OP_SET_EXP_FEATURE,
4824 				       MGMT_STATUS_NOT_SUPPORTED);
4825 	}
4826 
4827 	if (changed) {
4828 		if (val)
4829 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4830 		else
4831 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4832 	}
4833 
4834 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4835 		    val, changed);
4836 
4837 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4838 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4839 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4840 	err = mgmt_cmd_complete(sk, hdev->id,
4841 				MGMT_OP_SET_EXP_FEATURE, 0,
4842 				&rp, sizeof(rp));
4843 
4844 	if (changed)
4845 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4846 
4847 	return err;
4848 }
4849 
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4850 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4851 					  struct mgmt_cp_set_exp_feature *cp,
4852 					  u16 data_len)
4853 {
4854 	bool val, changed;
4855 	int err;
4856 	struct mgmt_rp_set_exp_feature rp;
4857 
4858 	/* Command requires to use a valid controller index */
4859 	if (!hdev)
4860 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4861 				       MGMT_OP_SET_EXP_FEATURE,
4862 				       MGMT_STATUS_INVALID_INDEX);
4863 
4864 	/* Parameters are limited to a single octet */
4865 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4866 		return mgmt_cmd_status(sk, hdev->id,
4867 				       MGMT_OP_SET_EXP_FEATURE,
4868 				       MGMT_STATUS_INVALID_PARAMS);
4869 
4870 	/* Only boolean on/off is supported */
4871 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4872 		return mgmt_cmd_status(sk, hdev->id,
4873 				       MGMT_OP_SET_EXP_FEATURE,
4874 				       MGMT_STATUS_INVALID_PARAMS);
4875 
4876 	val = !!cp->param[0];
4877 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4878 
4879 	if (!hci_dev_le_state_simultaneous(hdev)) {
4880 		return mgmt_cmd_status(sk, hdev->id,
4881 				       MGMT_OP_SET_EXP_FEATURE,
4882 				       MGMT_STATUS_NOT_SUPPORTED);
4883 	}
4884 
4885 	if (changed) {
4886 		if (val)
4887 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4888 		else
4889 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4890 	}
4891 
4892 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4893 		    val, changed);
4894 
4895 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4896 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4897 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4898 	err = mgmt_cmd_complete(sk, hdev->id,
4899 				MGMT_OP_SET_EXP_FEATURE, 0,
4900 				&rp, sizeof(rp));
4901 
4902 	if (changed)
4903 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4904 
4905 	return err;
4906 }
4907 
4908 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4909 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4910 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4911 {
4912 	struct mgmt_rp_set_exp_feature rp;
4913 	bool val, changed = false;
4914 	int err;
4915 
4916 	/* Command requires to use the non-controller index */
4917 	if (hdev)
4918 		return mgmt_cmd_status(sk, hdev->id,
4919 				       MGMT_OP_SET_EXP_FEATURE,
4920 				       MGMT_STATUS_INVALID_INDEX);
4921 
4922 	/* Parameters are limited to a single octet */
4923 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4924 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4925 				       MGMT_OP_SET_EXP_FEATURE,
4926 				       MGMT_STATUS_INVALID_PARAMS);
4927 
4928 	/* Only boolean on/off is supported */
4929 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4930 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4931 				       MGMT_OP_SET_EXP_FEATURE,
4932 				       MGMT_STATUS_INVALID_PARAMS);
4933 
4934 	val = cp->param[0] ? true : false;
4935 	if (val)
4936 		err = iso_init();
4937 	else
4938 		err = iso_exit();
4939 
4940 	if (!err)
4941 		changed = true;
4942 
4943 	memcpy(rp.uuid, iso_socket_uuid, 16);
4944 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4945 
4946 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4947 
4948 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4949 				MGMT_OP_SET_EXP_FEATURE, 0,
4950 				&rp, sizeof(rp));
4951 
4952 	if (changed)
4953 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4954 
4955 	return err;
4956 }
4957 #endif
4958 
4959 static const struct mgmt_exp_feature {
4960 	const u8 *uuid;
4961 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4962 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4963 } exp_features[] = {
4964 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4965 #ifdef CONFIG_BT_FEATURE_DEBUG
4966 	EXP_FEAT(debug_uuid, set_debug_func),
4967 #endif
4968 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4969 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4970 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4971 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4972 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4973 #ifdef CONFIG_BT_LE
4974 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4975 #endif
4976 
4977 	/* end with a null feature */
4978 	EXP_FEAT(NULL, NULL)
4979 };
4980 
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4981 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4982 			   void *data, u16 data_len)
4983 {
4984 	struct mgmt_cp_set_exp_feature *cp = data;
4985 	size_t i = 0;
4986 
4987 	bt_dev_dbg(hdev, "sock %p", sk);
4988 
4989 	for (i = 0; exp_features[i].uuid; i++) {
4990 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4991 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4992 	}
4993 
4994 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4995 			       MGMT_OP_SET_EXP_FEATURE,
4996 			       MGMT_STATUS_NOT_SUPPORTED);
4997 }
4998 
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4999 static u32 get_params_flags(struct hci_dev *hdev,
5000 			    struct hci_conn_params *params)
5001 {
5002 	u32 flags = hdev->conn_flags;
5003 
5004 	/* Devices using RPAs can only be programmed in the acceptlist if
5005 	 * LL Privacy has been enable otherwise they cannot mark
5006 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5007 	 */
5008 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5009 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5010 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5011 
5012 	return flags;
5013 }
5014 
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5015 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5016 			    u16 data_len)
5017 {
5018 	struct mgmt_cp_get_device_flags *cp = data;
5019 	struct mgmt_rp_get_device_flags rp;
5020 	struct bdaddr_list_with_flags *br_params;
5021 	struct hci_conn_params *params;
5022 	u32 supported_flags;
5023 	u32 current_flags = 0;
5024 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5025 
5026 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5027 		   &cp->addr.bdaddr, cp->addr.type);
5028 
5029 	hci_dev_lock(hdev);
5030 
5031 	supported_flags = hdev->conn_flags;
5032 
5033 	memset(&rp, 0, sizeof(rp));
5034 
5035 	if (cp->addr.type == BDADDR_BREDR) {
5036 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5037 							      &cp->addr.bdaddr,
5038 							      cp->addr.type);
5039 		if (!br_params)
5040 			goto done;
5041 
5042 		current_flags = br_params->flags;
5043 	} else {
5044 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5045 						le_addr_type(cp->addr.type));
5046 		if (!params)
5047 			goto done;
5048 
5049 		supported_flags = get_params_flags(hdev, params);
5050 		current_flags = params->flags;
5051 	}
5052 
5053 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5054 	rp.addr.type = cp->addr.type;
5055 	rp.supported_flags = cpu_to_le32(supported_flags);
5056 	rp.current_flags = cpu_to_le32(current_flags);
5057 
5058 	status = MGMT_STATUS_SUCCESS;
5059 
5060 done:
5061 	hci_dev_unlock(hdev);
5062 
5063 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5064 				&rp, sizeof(rp));
5065 }
5066 
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5067 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5068 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5069 				 u32 supported_flags, u32 current_flags)
5070 {
5071 	struct mgmt_ev_device_flags_changed ev;
5072 
5073 	bacpy(&ev.addr.bdaddr, bdaddr);
5074 	ev.addr.type = bdaddr_type;
5075 	ev.supported_flags = cpu_to_le32(supported_flags);
5076 	ev.current_flags = cpu_to_le32(current_flags);
5077 
5078 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5079 }
5080 
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5081 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5082 			    u16 len)
5083 {
5084 	struct mgmt_cp_set_device_flags *cp = data;
5085 	struct bdaddr_list_with_flags *br_params;
5086 	struct hci_conn_params *params;
5087 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5088 	u32 supported_flags;
5089 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5090 
5091 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5092 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5093 
5094 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5095 	supported_flags = hdev->conn_flags;
5096 
5097 	if ((supported_flags | current_flags) != supported_flags) {
5098 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5099 			    current_flags, supported_flags);
5100 		goto done;
5101 	}
5102 
5103 	hci_dev_lock(hdev);
5104 
5105 	if (cp->addr.type == BDADDR_BREDR) {
5106 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5107 							      &cp->addr.bdaddr,
5108 							      cp->addr.type);
5109 
5110 		if (br_params) {
5111 			br_params->flags = current_flags;
5112 			status = MGMT_STATUS_SUCCESS;
5113 		} else {
5114 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5115 				    &cp->addr.bdaddr, cp->addr.type);
5116 		}
5117 
5118 		goto unlock;
5119 	}
5120 
5121 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5122 					le_addr_type(cp->addr.type));
5123 	if (!params) {
5124 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5125 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5126 		goto unlock;
5127 	}
5128 
5129 	supported_flags = get_params_flags(hdev, params);
5130 
5131 	if ((supported_flags | current_flags) != supported_flags) {
5132 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5133 			    current_flags, supported_flags);
5134 		goto unlock;
5135 	}
5136 
5137 	WRITE_ONCE(params->flags, current_flags);
5138 	status = MGMT_STATUS_SUCCESS;
5139 
5140 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5141 	 * has been set.
5142 	 */
5143 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5144 		hci_update_passive_scan(hdev);
5145 
5146 unlock:
5147 	hci_dev_unlock(hdev);
5148 
5149 done:
5150 	if (status == MGMT_STATUS_SUCCESS)
5151 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5152 				     supported_flags, current_flags);
5153 
5154 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5155 				 &cp->addr, sizeof(cp->addr));
5156 }
5157 
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5158 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5159 				   u16 handle)
5160 {
5161 	struct mgmt_ev_adv_monitor_added ev;
5162 
5163 	ev.monitor_handle = cpu_to_le16(handle);
5164 
5165 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5166 }
5167 
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5168 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5169 {
5170 	struct mgmt_ev_adv_monitor_removed ev;
5171 	struct mgmt_pending_cmd *cmd;
5172 	struct sock *sk_skip = NULL;
5173 	struct mgmt_cp_remove_adv_monitor *cp;
5174 
5175 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5176 	if (cmd) {
5177 		cp = cmd->param;
5178 
5179 		if (cp->monitor_handle)
5180 			sk_skip = cmd->sk;
5181 	}
5182 
5183 	ev.monitor_handle = cpu_to_le16(handle);
5184 
5185 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5186 }
5187 
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5188 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5189 				 void *data, u16 len)
5190 {
5191 	struct adv_monitor *monitor = NULL;
5192 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5193 	int handle, err;
5194 	size_t rp_size = 0;
5195 	__u32 supported = 0;
5196 	__u32 enabled = 0;
5197 	__u16 num_handles = 0;
5198 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5199 
5200 	BT_DBG("request for %s", hdev->name);
5201 
5202 	hci_dev_lock(hdev);
5203 
5204 	if (msft_monitor_supported(hdev))
5205 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5206 
5207 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5208 		handles[num_handles++] = monitor->handle;
5209 
5210 	hci_dev_unlock(hdev);
5211 
5212 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5213 	rp = kmalloc(rp_size, GFP_KERNEL);
5214 	if (!rp)
5215 		return -ENOMEM;
5216 
5217 	/* All supported features are currently enabled */
5218 	enabled = supported;
5219 
5220 	rp->supported_features = cpu_to_le32(supported);
5221 	rp->enabled_features = cpu_to_le32(enabled);
5222 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5223 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5224 	rp->num_handles = cpu_to_le16(num_handles);
5225 	if (num_handles)
5226 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5227 
5228 	err = mgmt_cmd_complete(sk, hdev->id,
5229 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5230 				MGMT_STATUS_SUCCESS, rp, rp_size);
5231 
5232 	kfree(rp);
5233 
5234 	return err;
5235 }
5236 
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5237 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5238 						   void *data, int status)
5239 {
5240 	struct mgmt_rp_add_adv_patterns_monitor rp;
5241 	struct mgmt_pending_cmd *cmd = data;
5242 	struct adv_monitor *monitor = cmd->user_data;
5243 
5244 	hci_dev_lock(hdev);
5245 
5246 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5247 
5248 	if (!status) {
5249 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5250 		hdev->adv_monitors_cnt++;
5251 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5252 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5253 		hci_update_passive_scan(hdev);
5254 	}
5255 
5256 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5257 			  mgmt_status(status), &rp, sizeof(rp));
5258 	mgmt_pending_remove(cmd);
5259 
5260 	hci_dev_unlock(hdev);
5261 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5262 		   rp.monitor_handle, status);
5263 }
5264 
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5265 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5266 {
5267 	struct mgmt_pending_cmd *cmd = data;
5268 	struct adv_monitor *monitor = cmd->user_data;
5269 
5270 	return hci_add_adv_monitor(hdev, monitor);
5271 }
5272 
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5273 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5274 				      struct adv_monitor *m, u8 status,
5275 				      void *data, u16 len, u16 op)
5276 {
5277 	struct mgmt_pending_cmd *cmd;
5278 	int err;
5279 
5280 	hci_dev_lock(hdev);
5281 
5282 	if (status)
5283 		goto unlock;
5284 
5285 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5286 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5287 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5288 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5289 		status = MGMT_STATUS_BUSY;
5290 		goto unlock;
5291 	}
5292 
5293 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5294 	if (!cmd) {
5295 		status = MGMT_STATUS_NO_RESOURCES;
5296 		goto unlock;
5297 	}
5298 
5299 	cmd->user_data = m;
5300 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5301 				 mgmt_add_adv_patterns_monitor_complete);
5302 	if (err) {
5303 		if (err == -ENOMEM)
5304 			status = MGMT_STATUS_NO_RESOURCES;
5305 		else
5306 			status = MGMT_STATUS_FAILED;
5307 
5308 		goto unlock;
5309 	}
5310 
5311 	hci_dev_unlock(hdev);
5312 
5313 	return 0;
5314 
5315 unlock:
5316 	hci_free_adv_monitor(hdev, m);
5317 	hci_dev_unlock(hdev);
5318 	return mgmt_cmd_status(sk, hdev->id, op, status);
5319 }
5320 
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5321 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5322 				   struct mgmt_adv_rssi_thresholds *rssi)
5323 {
5324 	if (rssi) {
5325 		m->rssi.low_threshold = rssi->low_threshold;
5326 		m->rssi.low_threshold_timeout =
5327 		    __le16_to_cpu(rssi->low_threshold_timeout);
5328 		m->rssi.high_threshold = rssi->high_threshold;
5329 		m->rssi.high_threshold_timeout =
5330 		    __le16_to_cpu(rssi->high_threshold_timeout);
5331 		m->rssi.sampling_period = rssi->sampling_period;
5332 	} else {
5333 		/* Default values. These numbers are the least constricting
5334 		 * parameters for MSFT API to work, so it behaves as if there
5335 		 * are no rssi parameter to consider. May need to be changed
5336 		 * if other API are to be supported.
5337 		 */
5338 		m->rssi.low_threshold = -127;
5339 		m->rssi.low_threshold_timeout = 60;
5340 		m->rssi.high_threshold = -127;
5341 		m->rssi.high_threshold_timeout = 0;
5342 		m->rssi.sampling_period = 0;
5343 	}
5344 }
5345 
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5346 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5347 				    struct mgmt_adv_pattern *patterns)
5348 {
5349 	u8 offset = 0, length = 0;
5350 	struct adv_pattern *p = NULL;
5351 	int i;
5352 
5353 	for (i = 0; i < pattern_count; i++) {
5354 		offset = patterns[i].offset;
5355 		length = patterns[i].length;
5356 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5357 		    length > HCI_MAX_EXT_AD_LENGTH ||
5358 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5359 			return MGMT_STATUS_INVALID_PARAMS;
5360 
5361 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5362 		if (!p)
5363 			return MGMT_STATUS_NO_RESOURCES;
5364 
5365 		p->ad_type = patterns[i].ad_type;
5366 		p->offset = patterns[i].offset;
5367 		p->length = patterns[i].length;
5368 		memcpy(p->value, patterns[i].value, p->length);
5369 
5370 		INIT_LIST_HEAD(&p->list);
5371 		list_add(&p->list, &m->patterns);
5372 	}
5373 
5374 	return MGMT_STATUS_SUCCESS;
5375 }
5376 
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5377 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5378 				    void *data, u16 len)
5379 {
5380 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5381 	struct adv_monitor *m = NULL;
5382 	u8 status = MGMT_STATUS_SUCCESS;
5383 	size_t expected_size = sizeof(*cp);
5384 
5385 	BT_DBG("request for %s", hdev->name);
5386 
5387 	if (len <= sizeof(*cp)) {
5388 		status = MGMT_STATUS_INVALID_PARAMS;
5389 		goto done;
5390 	}
5391 
5392 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5393 	if (len != expected_size) {
5394 		status = MGMT_STATUS_INVALID_PARAMS;
5395 		goto done;
5396 	}
5397 
5398 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5399 	if (!m) {
5400 		status = MGMT_STATUS_NO_RESOURCES;
5401 		goto done;
5402 	}
5403 
5404 	INIT_LIST_HEAD(&m->patterns);
5405 
5406 	parse_adv_monitor_rssi(m, NULL);
5407 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5408 
5409 done:
5410 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5411 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5412 }
5413 
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5414 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5415 					 void *data, u16 len)
5416 {
5417 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5418 	struct adv_monitor *m = NULL;
5419 	u8 status = MGMT_STATUS_SUCCESS;
5420 	size_t expected_size = sizeof(*cp);
5421 
5422 	BT_DBG("request for %s", hdev->name);
5423 
5424 	if (len <= sizeof(*cp)) {
5425 		status = MGMT_STATUS_INVALID_PARAMS;
5426 		goto done;
5427 	}
5428 
5429 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5430 	if (len != expected_size) {
5431 		status = MGMT_STATUS_INVALID_PARAMS;
5432 		goto done;
5433 	}
5434 
5435 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5436 	if (!m) {
5437 		status = MGMT_STATUS_NO_RESOURCES;
5438 		goto done;
5439 	}
5440 
5441 	INIT_LIST_HEAD(&m->patterns);
5442 
5443 	parse_adv_monitor_rssi(m, &cp->rssi);
5444 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5445 
5446 done:
5447 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5448 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5449 }
5450 
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5451 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5452 					     void *data, int status)
5453 {
5454 	struct mgmt_rp_remove_adv_monitor rp;
5455 	struct mgmt_pending_cmd *cmd = data;
5456 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5457 
5458 	hci_dev_lock(hdev);
5459 
5460 	rp.monitor_handle = cp->monitor_handle;
5461 
5462 	if (!status)
5463 		hci_update_passive_scan(hdev);
5464 
5465 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5466 			  mgmt_status(status), &rp, sizeof(rp));
5467 	mgmt_pending_remove(cmd);
5468 
5469 	hci_dev_unlock(hdev);
5470 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5471 		   rp.monitor_handle, status);
5472 }
5473 
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5474 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5475 {
5476 	struct mgmt_pending_cmd *cmd = data;
5477 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5478 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5479 
5480 	if (!handle)
5481 		return hci_remove_all_adv_monitor(hdev);
5482 
5483 	return hci_remove_single_adv_monitor(hdev, handle);
5484 }
5485 
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5486 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5487 			      void *data, u16 len)
5488 {
5489 	struct mgmt_pending_cmd *cmd;
5490 	int err, status;
5491 
5492 	hci_dev_lock(hdev);
5493 
5494 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5495 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5496 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5497 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5498 		status = MGMT_STATUS_BUSY;
5499 		goto unlock;
5500 	}
5501 
5502 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5503 	if (!cmd) {
5504 		status = MGMT_STATUS_NO_RESOURCES;
5505 		goto unlock;
5506 	}
5507 
5508 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5509 				  mgmt_remove_adv_monitor_complete);
5510 
5511 	if (err) {
5512 		mgmt_pending_remove(cmd);
5513 
5514 		if (err == -ENOMEM)
5515 			status = MGMT_STATUS_NO_RESOURCES;
5516 		else
5517 			status = MGMT_STATUS_FAILED;
5518 
5519 		goto unlock;
5520 	}
5521 
5522 	hci_dev_unlock(hdev);
5523 
5524 	return 0;
5525 
5526 unlock:
5527 	hci_dev_unlock(hdev);
5528 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5529 			       status);
5530 }
5531 
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5532 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5533 {
5534 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5535 	size_t rp_size = sizeof(mgmt_rp);
5536 	struct mgmt_pending_cmd *cmd = data;
5537 	struct sk_buff *skb = cmd->skb;
5538 	u8 status = mgmt_status(err);
5539 
5540 	if (!status) {
5541 		if (!skb)
5542 			status = MGMT_STATUS_FAILED;
5543 		else if (IS_ERR(skb))
5544 			status = mgmt_status(PTR_ERR(skb));
5545 		else
5546 			status = mgmt_status(skb->data[0]);
5547 	}
5548 
5549 	bt_dev_dbg(hdev, "status %d", status);
5550 
5551 	if (status) {
5552 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5553 		goto remove;
5554 	}
5555 
5556 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5557 
5558 	if (!bredr_sc_enabled(hdev)) {
5559 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5560 
5561 		if (skb->len < sizeof(*rp)) {
5562 			mgmt_cmd_status(cmd->sk, hdev->id,
5563 					MGMT_OP_READ_LOCAL_OOB_DATA,
5564 					MGMT_STATUS_FAILED);
5565 			goto remove;
5566 		}
5567 
5568 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5569 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5570 
5571 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5572 	} else {
5573 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5574 
5575 		if (skb->len < sizeof(*rp)) {
5576 			mgmt_cmd_status(cmd->sk, hdev->id,
5577 					MGMT_OP_READ_LOCAL_OOB_DATA,
5578 					MGMT_STATUS_FAILED);
5579 			goto remove;
5580 		}
5581 
5582 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5583 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5584 
5585 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5586 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5587 	}
5588 
5589 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5590 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5591 
5592 remove:
5593 	if (skb && !IS_ERR(skb))
5594 		kfree_skb(skb);
5595 
5596 	mgmt_pending_free(cmd);
5597 }
5598 
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5599 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5600 {
5601 	struct mgmt_pending_cmd *cmd = data;
5602 
5603 	if (bredr_sc_enabled(hdev))
5604 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5605 	else
5606 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5607 
5608 	if (IS_ERR(cmd->skb))
5609 		return PTR_ERR(cmd->skb);
5610 	else
5611 		return 0;
5612 }
5613 
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5614 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5615 			       void *data, u16 data_len)
5616 {
5617 	struct mgmt_pending_cmd *cmd;
5618 	int err;
5619 
5620 	bt_dev_dbg(hdev, "sock %p", sk);
5621 
5622 	hci_dev_lock(hdev);
5623 
5624 	if (!hdev_is_powered(hdev)) {
5625 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5626 				      MGMT_STATUS_NOT_POWERED);
5627 		goto unlock;
5628 	}
5629 
5630 	if (!lmp_ssp_capable(hdev)) {
5631 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5632 				      MGMT_STATUS_NOT_SUPPORTED);
5633 		goto unlock;
5634 	}
5635 
5636 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5637 	if (!cmd)
5638 		err = -ENOMEM;
5639 	else
5640 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5641 					 read_local_oob_data_complete);
5642 
5643 	if (err < 0) {
5644 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5645 				      MGMT_STATUS_FAILED);
5646 
5647 		if (cmd)
5648 			mgmt_pending_free(cmd);
5649 	}
5650 
5651 unlock:
5652 	hci_dev_unlock(hdev);
5653 	return err;
5654 }
5655 
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5656 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5657 			       void *data, u16 len)
5658 {
5659 	struct mgmt_addr_info *addr = data;
5660 	int err;
5661 
5662 	bt_dev_dbg(hdev, "sock %p", sk);
5663 
5664 	if (!bdaddr_type_is_valid(addr->type))
5665 		return mgmt_cmd_complete(sk, hdev->id,
5666 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5667 					 MGMT_STATUS_INVALID_PARAMS,
5668 					 addr, sizeof(*addr));
5669 
5670 	hci_dev_lock(hdev);
5671 
5672 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5673 		struct mgmt_cp_add_remote_oob_data *cp = data;
5674 		u8 status;
5675 
5676 		if (cp->addr.type != BDADDR_BREDR) {
5677 			err = mgmt_cmd_complete(sk, hdev->id,
5678 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5679 						MGMT_STATUS_INVALID_PARAMS,
5680 						&cp->addr, sizeof(cp->addr));
5681 			goto unlock;
5682 		}
5683 
5684 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5685 					      cp->addr.type, cp->hash,
5686 					      cp->rand, NULL, NULL);
5687 		if (err < 0)
5688 			status = MGMT_STATUS_FAILED;
5689 		else
5690 			status = MGMT_STATUS_SUCCESS;
5691 
5692 		err = mgmt_cmd_complete(sk, hdev->id,
5693 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5694 					&cp->addr, sizeof(cp->addr));
5695 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5696 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5697 		u8 *rand192, *hash192, *rand256, *hash256;
5698 		u8 status;
5699 
5700 		if (bdaddr_type_is_le(cp->addr.type)) {
5701 			/* Enforce zero-valued 192-bit parameters as
5702 			 * long as legacy SMP OOB isn't implemented.
5703 			 */
5704 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5705 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5706 				err = mgmt_cmd_complete(sk, hdev->id,
5707 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5708 							MGMT_STATUS_INVALID_PARAMS,
5709 							addr, sizeof(*addr));
5710 				goto unlock;
5711 			}
5712 
5713 			rand192 = NULL;
5714 			hash192 = NULL;
5715 		} else {
5716 			/* In case one of the P-192 values is set to zero,
5717 			 * then just disable OOB data for P-192.
5718 			 */
5719 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5720 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5721 				rand192 = NULL;
5722 				hash192 = NULL;
5723 			} else {
5724 				rand192 = cp->rand192;
5725 				hash192 = cp->hash192;
5726 			}
5727 		}
5728 
5729 		/* In case one of the P-256 values is set to zero, then just
5730 		 * disable OOB data for P-256.
5731 		 */
5732 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5733 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5734 			rand256 = NULL;
5735 			hash256 = NULL;
5736 		} else {
5737 			rand256 = cp->rand256;
5738 			hash256 = cp->hash256;
5739 		}
5740 
5741 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5742 					      cp->addr.type, hash192, rand192,
5743 					      hash256, rand256);
5744 		if (err < 0)
5745 			status = MGMT_STATUS_FAILED;
5746 		else
5747 			status = MGMT_STATUS_SUCCESS;
5748 
5749 		err = mgmt_cmd_complete(sk, hdev->id,
5750 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5751 					status, &cp->addr, sizeof(cp->addr));
5752 	} else {
5753 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5754 			   len);
5755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5756 				      MGMT_STATUS_INVALID_PARAMS);
5757 	}
5758 
5759 unlock:
5760 	hci_dev_unlock(hdev);
5761 	return err;
5762 }
5763 
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5764 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5765 				  void *data, u16 len)
5766 {
5767 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5768 	u8 status;
5769 	int err;
5770 
5771 	bt_dev_dbg(hdev, "sock %p", sk);
5772 
5773 	if (cp->addr.type != BDADDR_BREDR)
5774 		return mgmt_cmd_complete(sk, hdev->id,
5775 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5776 					 MGMT_STATUS_INVALID_PARAMS,
5777 					 &cp->addr, sizeof(cp->addr));
5778 
5779 	hci_dev_lock(hdev);
5780 
5781 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5782 		hci_remote_oob_data_clear(hdev);
5783 		status = MGMT_STATUS_SUCCESS;
5784 		goto done;
5785 	}
5786 
5787 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5788 	if (err < 0)
5789 		status = MGMT_STATUS_INVALID_PARAMS;
5790 	else
5791 		status = MGMT_STATUS_SUCCESS;
5792 
5793 done:
5794 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5795 				status, &cp->addr, sizeof(cp->addr));
5796 
5797 	hci_dev_unlock(hdev);
5798 	return err;
5799 }
5800 
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5801 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5802 {
5803 	struct mgmt_pending_cmd *cmd;
5804 
5805 	bt_dev_dbg(hdev, "status %u", status);
5806 
5807 	hci_dev_lock(hdev);
5808 
5809 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5810 	if (!cmd)
5811 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5812 
5813 	if (!cmd)
5814 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5815 
5816 	if (cmd) {
5817 		cmd->cmd_complete(cmd, mgmt_status(status));
5818 		mgmt_pending_remove(cmd);
5819 	}
5820 
5821 	hci_dev_unlock(hdev);
5822 }
5823 
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5824 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5825 				    uint8_t *mgmt_status)
5826 {
5827 	switch (type) {
5828 	case DISCOV_TYPE_LE:
5829 		*mgmt_status = mgmt_le_support(hdev);
5830 		if (*mgmt_status)
5831 			return false;
5832 		break;
5833 	case DISCOV_TYPE_INTERLEAVED:
5834 		*mgmt_status = mgmt_le_support(hdev);
5835 		if (*mgmt_status)
5836 			return false;
5837 		fallthrough;
5838 	case DISCOV_TYPE_BREDR:
5839 		*mgmt_status = mgmt_bredr_support(hdev);
5840 		if (*mgmt_status)
5841 			return false;
5842 		break;
5843 	default:
5844 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5845 		return false;
5846 	}
5847 
5848 	return true;
5849 }
5850 
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5851 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5852 {
5853 	struct mgmt_pending_cmd *cmd = data;
5854 
5855 	bt_dev_dbg(hdev, "err %d", err);
5856 
5857 	if (err == -ECANCELED)
5858 		return;
5859 
5860 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5861 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5862 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5863 		return;
5864 
5865 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5866 			  cmd->param, 1);
5867 	mgmt_pending_remove(cmd);
5868 
5869 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5870 				DISCOVERY_FINDING);
5871 }
5872 
start_discovery_sync(struct hci_dev * hdev,void * data)5873 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5874 {
5875 	return hci_start_discovery_sync(hdev);
5876 }
5877 
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5878 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5879 				    u16 op, void *data, u16 len)
5880 {
5881 	struct mgmt_cp_start_discovery *cp = data;
5882 	struct mgmt_pending_cmd *cmd;
5883 	u8 status;
5884 	int err;
5885 
5886 	bt_dev_dbg(hdev, "sock %p", sk);
5887 
5888 	hci_dev_lock(hdev);
5889 
5890 	if (!hdev_is_powered(hdev)) {
5891 		err = mgmt_cmd_complete(sk, hdev->id, op,
5892 					MGMT_STATUS_NOT_POWERED,
5893 					&cp->type, sizeof(cp->type));
5894 		goto failed;
5895 	}
5896 
5897 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5898 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5899 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5900 					&cp->type, sizeof(cp->type));
5901 		goto failed;
5902 	}
5903 
5904 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5905 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5906 					&cp->type, sizeof(cp->type));
5907 		goto failed;
5908 	}
5909 
5910 	/* Can't start discovery when it is paused */
5911 	if (hdev->discovery_paused) {
5912 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5913 					&cp->type, sizeof(cp->type));
5914 		goto failed;
5915 	}
5916 
5917 	/* Clear the discovery filter first to free any previously
5918 	 * allocated memory for the UUID list.
5919 	 */
5920 	hci_discovery_filter_clear(hdev);
5921 
5922 	hdev->discovery.type = cp->type;
5923 	hdev->discovery.report_invalid_rssi = false;
5924 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5925 		hdev->discovery.limited = true;
5926 	else
5927 		hdev->discovery.limited = false;
5928 
5929 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5930 	if (!cmd) {
5931 		err = -ENOMEM;
5932 		goto failed;
5933 	}
5934 
5935 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5936 				 start_discovery_complete);
5937 	if (err < 0) {
5938 		mgmt_pending_remove(cmd);
5939 		goto failed;
5940 	}
5941 
5942 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5943 
5944 failed:
5945 	hci_dev_unlock(hdev);
5946 	return err;
5947 }
5948 
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5949 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5950 			   void *data, u16 len)
5951 {
5952 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5953 					data, len);
5954 }
5955 
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5956 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5957 				   void *data, u16 len)
5958 {
5959 	return start_discovery_internal(sk, hdev,
5960 					MGMT_OP_START_LIMITED_DISCOVERY,
5961 					data, len);
5962 }
5963 
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5964 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5965 				   void *data, u16 len)
5966 {
5967 	struct mgmt_cp_start_service_discovery *cp = data;
5968 	struct mgmt_pending_cmd *cmd;
5969 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5970 	u16 uuid_count, expected_len;
5971 	u8 status;
5972 	int err;
5973 
5974 	bt_dev_dbg(hdev, "sock %p", sk);
5975 
5976 	hci_dev_lock(hdev);
5977 
5978 	if (!hdev_is_powered(hdev)) {
5979 		err = mgmt_cmd_complete(sk, hdev->id,
5980 					MGMT_OP_START_SERVICE_DISCOVERY,
5981 					MGMT_STATUS_NOT_POWERED,
5982 					&cp->type, sizeof(cp->type));
5983 		goto failed;
5984 	}
5985 
5986 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5987 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5988 		err = mgmt_cmd_complete(sk, hdev->id,
5989 					MGMT_OP_START_SERVICE_DISCOVERY,
5990 					MGMT_STATUS_BUSY, &cp->type,
5991 					sizeof(cp->type));
5992 		goto failed;
5993 	}
5994 
5995 	if (hdev->discovery_paused) {
5996 		err = mgmt_cmd_complete(sk, hdev->id,
5997 					MGMT_OP_START_SERVICE_DISCOVERY,
5998 					MGMT_STATUS_BUSY, &cp->type,
5999 					sizeof(cp->type));
6000 		goto failed;
6001 	}
6002 
6003 	uuid_count = __le16_to_cpu(cp->uuid_count);
6004 	if (uuid_count > max_uuid_count) {
6005 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6006 			   uuid_count);
6007 		err = mgmt_cmd_complete(sk, hdev->id,
6008 					MGMT_OP_START_SERVICE_DISCOVERY,
6009 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6010 					sizeof(cp->type));
6011 		goto failed;
6012 	}
6013 
6014 	expected_len = sizeof(*cp) + uuid_count * 16;
6015 	if (expected_len != len) {
6016 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6017 			   expected_len, len);
6018 		err = mgmt_cmd_complete(sk, hdev->id,
6019 					MGMT_OP_START_SERVICE_DISCOVERY,
6020 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6021 					sizeof(cp->type));
6022 		goto failed;
6023 	}
6024 
6025 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6026 		err = mgmt_cmd_complete(sk, hdev->id,
6027 					MGMT_OP_START_SERVICE_DISCOVERY,
6028 					status, &cp->type, sizeof(cp->type));
6029 		goto failed;
6030 	}
6031 
6032 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6033 			       hdev, data, len);
6034 	if (!cmd) {
6035 		err = -ENOMEM;
6036 		goto failed;
6037 	}
6038 
6039 	/* Clear the discovery filter first to free any previously
6040 	 * allocated memory for the UUID list.
6041 	 */
6042 	hci_discovery_filter_clear(hdev);
6043 
6044 	hdev->discovery.result_filtering = true;
6045 	hdev->discovery.type = cp->type;
6046 	hdev->discovery.rssi = cp->rssi;
6047 	hdev->discovery.uuid_count = uuid_count;
6048 
6049 	if (uuid_count > 0) {
6050 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6051 						GFP_KERNEL);
6052 		if (!hdev->discovery.uuids) {
6053 			err = mgmt_cmd_complete(sk, hdev->id,
6054 						MGMT_OP_START_SERVICE_DISCOVERY,
6055 						MGMT_STATUS_FAILED,
6056 						&cp->type, sizeof(cp->type));
6057 			mgmt_pending_remove(cmd);
6058 			goto failed;
6059 		}
6060 	}
6061 
6062 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6063 				 start_discovery_complete);
6064 	if (err < 0) {
6065 		mgmt_pending_remove(cmd);
6066 		goto failed;
6067 	}
6068 
6069 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6070 
6071 failed:
6072 	hci_dev_unlock(hdev);
6073 	return err;
6074 }
6075 
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6076 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6077 {
6078 	struct mgmt_pending_cmd *cmd;
6079 
6080 	bt_dev_dbg(hdev, "status %u", status);
6081 
6082 	hci_dev_lock(hdev);
6083 
6084 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6085 	if (cmd) {
6086 		cmd->cmd_complete(cmd, mgmt_status(status));
6087 		mgmt_pending_remove(cmd);
6088 	}
6089 
6090 	hci_dev_unlock(hdev);
6091 }
6092 
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6093 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6094 {
6095 	struct mgmt_pending_cmd *cmd = data;
6096 
6097 	if (err == -ECANCELED ||
6098 	    cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6099 		return;
6100 
6101 	bt_dev_dbg(hdev, "err %d", err);
6102 
6103 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6104 			  cmd->param, 1);
6105 	mgmt_pending_remove(cmd);
6106 
6107 	if (!err)
6108 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6109 }
6110 
stop_discovery_sync(struct hci_dev * hdev,void * data)6111 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6112 {
6113 	return hci_stop_discovery_sync(hdev);
6114 }
6115 
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6116 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6117 			  u16 len)
6118 {
6119 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6120 	struct mgmt_pending_cmd *cmd;
6121 	int err;
6122 
6123 	bt_dev_dbg(hdev, "sock %p", sk);
6124 
6125 	hci_dev_lock(hdev);
6126 
6127 	if (!hci_discovery_active(hdev)) {
6128 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6129 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6130 					sizeof(mgmt_cp->type));
6131 		goto unlock;
6132 	}
6133 
6134 	if (hdev->discovery.type != mgmt_cp->type) {
6135 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6136 					MGMT_STATUS_INVALID_PARAMS,
6137 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6138 		goto unlock;
6139 	}
6140 
6141 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6142 	if (!cmd) {
6143 		err = -ENOMEM;
6144 		goto unlock;
6145 	}
6146 
6147 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6148 				 stop_discovery_complete);
6149 	if (err < 0) {
6150 		mgmt_pending_remove(cmd);
6151 		goto unlock;
6152 	}
6153 
6154 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6155 
6156 unlock:
6157 	hci_dev_unlock(hdev);
6158 	return err;
6159 }
6160 
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6161 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6162 			u16 len)
6163 {
6164 	struct mgmt_cp_confirm_name *cp = data;
6165 	struct inquiry_entry *e;
6166 	int err;
6167 
6168 	bt_dev_dbg(hdev, "sock %p", sk);
6169 
6170 	hci_dev_lock(hdev);
6171 
6172 	if (!hci_discovery_active(hdev)) {
6173 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6174 					MGMT_STATUS_FAILED, &cp->addr,
6175 					sizeof(cp->addr));
6176 		goto failed;
6177 	}
6178 
6179 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6180 	if (!e) {
6181 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6182 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6183 					sizeof(cp->addr));
6184 		goto failed;
6185 	}
6186 
6187 	if (cp->name_known) {
6188 		e->name_state = NAME_KNOWN;
6189 		list_del(&e->list);
6190 	} else {
6191 		e->name_state = NAME_NEEDED;
6192 		hci_inquiry_cache_update_resolve(hdev, e);
6193 	}
6194 
6195 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6196 				&cp->addr, sizeof(cp->addr));
6197 
6198 failed:
6199 	hci_dev_unlock(hdev);
6200 	return err;
6201 }
6202 
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6203 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6204 			u16 len)
6205 {
6206 	struct mgmt_cp_block_device *cp = data;
6207 	u8 status;
6208 	int err;
6209 
6210 	bt_dev_dbg(hdev, "sock %p", sk);
6211 
6212 	if (!bdaddr_type_is_valid(cp->addr.type))
6213 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6214 					 MGMT_STATUS_INVALID_PARAMS,
6215 					 &cp->addr, sizeof(cp->addr));
6216 
6217 	hci_dev_lock(hdev);
6218 
6219 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6220 				  cp->addr.type);
6221 	if (err < 0) {
6222 		status = MGMT_STATUS_FAILED;
6223 		goto done;
6224 	}
6225 
6226 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6227 		   sk);
6228 	status = MGMT_STATUS_SUCCESS;
6229 
6230 done:
6231 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6232 				&cp->addr, sizeof(cp->addr));
6233 
6234 	hci_dev_unlock(hdev);
6235 
6236 	return err;
6237 }
6238 
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6239 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6240 			  u16 len)
6241 {
6242 	struct mgmt_cp_unblock_device *cp = data;
6243 	u8 status;
6244 	int err;
6245 
6246 	bt_dev_dbg(hdev, "sock %p", sk);
6247 
6248 	if (!bdaddr_type_is_valid(cp->addr.type))
6249 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6250 					 MGMT_STATUS_INVALID_PARAMS,
6251 					 &cp->addr, sizeof(cp->addr));
6252 
6253 	hci_dev_lock(hdev);
6254 
6255 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6256 				  cp->addr.type);
6257 	if (err < 0) {
6258 		status = MGMT_STATUS_INVALID_PARAMS;
6259 		goto done;
6260 	}
6261 
6262 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6263 		   sk);
6264 	status = MGMT_STATUS_SUCCESS;
6265 
6266 done:
6267 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6268 				&cp->addr, sizeof(cp->addr));
6269 
6270 	hci_dev_unlock(hdev);
6271 
6272 	return err;
6273 }
6274 
set_device_id_sync(struct hci_dev * hdev,void * data)6275 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6276 {
6277 	return hci_update_eir_sync(hdev);
6278 }
6279 
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6280 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6281 			 u16 len)
6282 {
6283 	struct mgmt_cp_set_device_id *cp = data;
6284 	int err;
6285 	__u16 source;
6286 
6287 	bt_dev_dbg(hdev, "sock %p", sk);
6288 
6289 	source = __le16_to_cpu(cp->source);
6290 
6291 	if (source > 0x0002)
6292 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6293 				       MGMT_STATUS_INVALID_PARAMS);
6294 
6295 	hci_dev_lock(hdev);
6296 
6297 	hdev->devid_source = source;
6298 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6299 	hdev->devid_product = __le16_to_cpu(cp->product);
6300 	hdev->devid_version = __le16_to_cpu(cp->version);
6301 
6302 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6303 				NULL, 0);
6304 
6305 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6306 
6307 	hci_dev_unlock(hdev);
6308 
6309 	return err;
6310 }
6311 
enable_advertising_instance(struct hci_dev * hdev,int err)6312 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6313 {
6314 	if (err)
6315 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6316 	else
6317 		bt_dev_dbg(hdev, "status %d", err);
6318 }
6319 
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6320 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6321 {
6322 	struct cmd_lookup match = { NULL, hdev };
6323 	u8 instance;
6324 	struct adv_info *adv_instance;
6325 	u8 status = mgmt_status(err);
6326 
6327 	if (status) {
6328 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6329 				     cmd_status_rsp, &status);
6330 		return;
6331 	}
6332 
6333 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6334 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6335 	else
6336 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6337 
6338 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6339 			     &match);
6340 
6341 	new_settings(hdev, match.sk);
6342 
6343 	if (match.sk)
6344 		sock_put(match.sk);
6345 
6346 	/* If "Set Advertising" was just disabled and instance advertising was
6347 	 * set up earlier, then re-enable multi-instance advertising.
6348 	 */
6349 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6350 	    list_empty(&hdev->adv_instances))
6351 		return;
6352 
6353 	instance = hdev->cur_adv_instance;
6354 	if (!instance) {
6355 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6356 							struct adv_info, list);
6357 		if (!adv_instance)
6358 			return;
6359 
6360 		instance = adv_instance->instance;
6361 	}
6362 
6363 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6364 
6365 	enable_advertising_instance(hdev, err);
6366 }
6367 
set_adv_sync(struct hci_dev * hdev,void * data)6368 static int set_adv_sync(struct hci_dev *hdev, void *data)
6369 {
6370 	struct mgmt_pending_cmd *cmd = data;
6371 	struct mgmt_mode *cp = cmd->param;
6372 	u8 val = !!cp->val;
6373 
6374 	if (cp->val == 0x02)
6375 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6376 	else
6377 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6378 
6379 	cancel_adv_timeout(hdev);
6380 
6381 	if (val) {
6382 		/* Switch to instance "0" for the Set Advertising setting.
6383 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6384 		 * HCI_ADVERTISING flag is not yet set.
6385 		 */
6386 		hdev->cur_adv_instance = 0x00;
6387 
6388 		if (ext_adv_capable(hdev)) {
6389 			hci_start_ext_adv_sync(hdev, 0x00);
6390 		} else {
6391 			hci_update_adv_data_sync(hdev, 0x00);
6392 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6393 			hci_enable_advertising_sync(hdev);
6394 		}
6395 	} else {
6396 		hci_disable_advertising_sync(hdev);
6397 	}
6398 
6399 	return 0;
6400 }
6401 
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6402 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6403 			   u16 len)
6404 {
6405 	struct mgmt_mode *cp = data;
6406 	struct mgmt_pending_cmd *cmd;
6407 	u8 val, status;
6408 	int err;
6409 
6410 	bt_dev_dbg(hdev, "sock %p", sk);
6411 
6412 	status = mgmt_le_support(hdev);
6413 	if (status)
6414 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6415 				       status);
6416 
6417 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6418 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6419 				       MGMT_STATUS_INVALID_PARAMS);
6420 
6421 	if (hdev->advertising_paused)
6422 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6423 				       MGMT_STATUS_BUSY);
6424 
6425 	hci_dev_lock(hdev);
6426 
6427 	val = !!cp->val;
6428 
6429 	/* The following conditions are ones which mean that we should
6430 	 * not do any HCI communication but directly send a mgmt
6431 	 * response to user space (after toggling the flag if
6432 	 * necessary).
6433 	 */
6434 	if (!hdev_is_powered(hdev) ||
6435 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6436 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6437 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6438 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6439 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6440 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6441 		bool changed;
6442 
6443 		if (cp->val) {
6444 			hdev->cur_adv_instance = 0x00;
6445 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6446 			if (cp->val == 0x02)
6447 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6448 			else
6449 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6450 		} else {
6451 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6452 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6453 		}
6454 
6455 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6456 		if (err < 0)
6457 			goto unlock;
6458 
6459 		if (changed)
6460 			err = new_settings(hdev, sk);
6461 
6462 		goto unlock;
6463 	}
6464 
6465 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6466 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6467 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6468 				      MGMT_STATUS_BUSY);
6469 		goto unlock;
6470 	}
6471 
6472 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6473 	if (!cmd)
6474 		err = -ENOMEM;
6475 	else
6476 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6477 					 set_advertising_complete);
6478 
6479 	if (err < 0 && cmd)
6480 		mgmt_pending_remove(cmd);
6481 
6482 unlock:
6483 	hci_dev_unlock(hdev);
6484 	return err;
6485 }
6486 
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6487 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6488 			      void *data, u16 len)
6489 {
6490 	struct mgmt_cp_set_static_address *cp = data;
6491 	int err;
6492 
6493 	bt_dev_dbg(hdev, "sock %p", sk);
6494 
6495 	if (!lmp_le_capable(hdev))
6496 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6497 				       MGMT_STATUS_NOT_SUPPORTED);
6498 
6499 	if (hdev_is_powered(hdev))
6500 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6501 				       MGMT_STATUS_REJECTED);
6502 
6503 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6504 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6505 			return mgmt_cmd_status(sk, hdev->id,
6506 					       MGMT_OP_SET_STATIC_ADDRESS,
6507 					       MGMT_STATUS_INVALID_PARAMS);
6508 
6509 		/* Two most significant bits shall be set */
6510 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6511 			return mgmt_cmd_status(sk, hdev->id,
6512 					       MGMT_OP_SET_STATIC_ADDRESS,
6513 					       MGMT_STATUS_INVALID_PARAMS);
6514 	}
6515 
6516 	hci_dev_lock(hdev);
6517 
6518 	bacpy(&hdev->static_addr, &cp->bdaddr);
6519 
6520 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6521 	if (err < 0)
6522 		goto unlock;
6523 
6524 	err = new_settings(hdev, sk);
6525 
6526 unlock:
6527 	hci_dev_unlock(hdev);
6528 	return err;
6529 }
6530 
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6531 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6532 			   void *data, u16 len)
6533 {
6534 	struct mgmt_cp_set_scan_params *cp = data;
6535 	__u16 interval, window;
6536 	int err;
6537 
6538 	bt_dev_dbg(hdev, "sock %p", sk);
6539 
6540 	if (!lmp_le_capable(hdev))
6541 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6542 				       MGMT_STATUS_NOT_SUPPORTED);
6543 
6544 	interval = __le16_to_cpu(cp->interval);
6545 
6546 	if (interval < 0x0004 || interval > 0x4000)
6547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6548 				       MGMT_STATUS_INVALID_PARAMS);
6549 
6550 	window = __le16_to_cpu(cp->window);
6551 
6552 	if (window < 0x0004 || window > 0x4000)
6553 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6554 				       MGMT_STATUS_INVALID_PARAMS);
6555 
6556 	if (window > interval)
6557 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6558 				       MGMT_STATUS_INVALID_PARAMS);
6559 
6560 	hci_dev_lock(hdev);
6561 
6562 	hdev->le_scan_interval = interval;
6563 	hdev->le_scan_window = window;
6564 
6565 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6566 				NULL, 0);
6567 
6568 	/* If background scan is running, restart it so new parameters are
6569 	 * loaded.
6570 	 */
6571 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6572 	    hdev->discovery.state == DISCOVERY_STOPPED)
6573 		hci_update_passive_scan(hdev);
6574 
6575 	hci_dev_unlock(hdev);
6576 
6577 	return err;
6578 }
6579 
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6580 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6581 {
6582 	struct mgmt_pending_cmd *cmd = data;
6583 
6584 	bt_dev_dbg(hdev, "err %d", err);
6585 
6586 	if (err) {
6587 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6588 				mgmt_status(err));
6589 	} else {
6590 		struct mgmt_mode *cp = cmd->param;
6591 
6592 		if (cp->val)
6593 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6594 		else
6595 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6596 
6597 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6598 		new_settings(hdev, cmd->sk);
6599 	}
6600 
6601 	mgmt_pending_free(cmd);
6602 }
6603 
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6604 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6605 {
6606 	struct mgmt_pending_cmd *cmd = data;
6607 	struct mgmt_mode *cp = cmd->param;
6608 
6609 	return hci_write_fast_connectable_sync(hdev, cp->val);
6610 }
6611 
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6612 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6613 				void *data, u16 len)
6614 {
6615 	struct mgmt_mode *cp = data;
6616 	struct mgmt_pending_cmd *cmd;
6617 	int err;
6618 
6619 	bt_dev_dbg(hdev, "sock %p", sk);
6620 
6621 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6622 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6623 		return mgmt_cmd_status(sk, hdev->id,
6624 				       MGMT_OP_SET_FAST_CONNECTABLE,
6625 				       MGMT_STATUS_NOT_SUPPORTED);
6626 
6627 	if (cp->val != 0x00 && cp->val != 0x01)
6628 		return mgmt_cmd_status(sk, hdev->id,
6629 				       MGMT_OP_SET_FAST_CONNECTABLE,
6630 				       MGMT_STATUS_INVALID_PARAMS);
6631 
6632 	hci_dev_lock(hdev);
6633 
6634 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6635 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6636 		goto unlock;
6637 	}
6638 
6639 	if (!hdev_is_powered(hdev)) {
6640 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6641 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6642 		new_settings(hdev, sk);
6643 		goto unlock;
6644 	}
6645 
6646 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6647 			       len);
6648 	if (!cmd)
6649 		err = -ENOMEM;
6650 	else
6651 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6652 					 fast_connectable_complete);
6653 
6654 	if (err < 0) {
6655 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6656 				MGMT_STATUS_FAILED);
6657 
6658 		if (cmd)
6659 			mgmt_pending_free(cmd);
6660 	}
6661 
6662 unlock:
6663 	hci_dev_unlock(hdev);
6664 
6665 	return err;
6666 }
6667 
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6668 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6669 {
6670 	struct mgmt_pending_cmd *cmd = data;
6671 
6672 	bt_dev_dbg(hdev, "err %d", err);
6673 
6674 	if (err) {
6675 		u8 mgmt_err = mgmt_status(err);
6676 
6677 		/* We need to restore the flag if related HCI commands
6678 		 * failed.
6679 		 */
6680 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6681 
6682 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6683 	} else {
6684 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6685 		new_settings(hdev, cmd->sk);
6686 	}
6687 
6688 	mgmt_pending_free(cmd);
6689 }
6690 
set_bredr_sync(struct hci_dev * hdev,void * data)6691 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6692 {
6693 	int status;
6694 
6695 	status = hci_write_fast_connectable_sync(hdev, false);
6696 
6697 	if (!status)
6698 		status = hci_update_scan_sync(hdev);
6699 
6700 	/* Since only the advertising data flags will change, there
6701 	 * is no need to update the scan response data.
6702 	 */
6703 	if (!status)
6704 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6705 
6706 	return status;
6707 }
6708 
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6709 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6710 {
6711 	struct mgmt_mode *cp = data;
6712 	struct mgmt_pending_cmd *cmd;
6713 	int err;
6714 
6715 	bt_dev_dbg(hdev, "sock %p", sk);
6716 
6717 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6718 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6719 				       MGMT_STATUS_NOT_SUPPORTED);
6720 
6721 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6722 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6723 				       MGMT_STATUS_REJECTED);
6724 
6725 	if (cp->val != 0x00 && cp->val != 0x01)
6726 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6727 				       MGMT_STATUS_INVALID_PARAMS);
6728 
6729 	hci_dev_lock(hdev);
6730 
6731 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6732 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6733 		goto unlock;
6734 	}
6735 
6736 	if (!hdev_is_powered(hdev)) {
6737 		if (!cp->val) {
6738 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6739 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6740 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6741 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6742 		}
6743 
6744 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6745 
6746 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6747 		if (err < 0)
6748 			goto unlock;
6749 
6750 		err = new_settings(hdev, sk);
6751 		goto unlock;
6752 	}
6753 
6754 	/* Reject disabling when powered on */
6755 	if (!cp->val) {
6756 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6757 				      MGMT_STATUS_REJECTED);
6758 		goto unlock;
6759 	} else {
6760 		/* When configuring a dual-mode controller to operate
6761 		 * with LE only and using a static address, then switching
6762 		 * BR/EDR back on is not allowed.
6763 		 *
6764 		 * Dual-mode controllers shall operate with the public
6765 		 * address as its identity address for BR/EDR and LE. So
6766 		 * reject the attempt to create an invalid configuration.
6767 		 *
6768 		 * The same restrictions applies when secure connections
6769 		 * has been enabled. For BR/EDR this is a controller feature
6770 		 * while for LE it is a host stack feature. This means that
6771 		 * switching BR/EDR back on when secure connections has been
6772 		 * enabled is not a supported transaction.
6773 		 */
6774 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6775 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6776 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6777 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6778 					      MGMT_STATUS_REJECTED);
6779 			goto unlock;
6780 		}
6781 	}
6782 
6783 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6784 	if (!cmd)
6785 		err = -ENOMEM;
6786 	else
6787 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6788 					 set_bredr_complete);
6789 
6790 	if (err < 0) {
6791 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6792 				MGMT_STATUS_FAILED);
6793 		if (cmd)
6794 			mgmt_pending_free(cmd);
6795 
6796 		goto unlock;
6797 	}
6798 
6799 	/* We need to flip the bit already here so that
6800 	 * hci_req_update_adv_data generates the correct flags.
6801 	 */
6802 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6803 
6804 unlock:
6805 	hci_dev_unlock(hdev);
6806 	return err;
6807 }
6808 
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6809 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6810 {
6811 	struct mgmt_pending_cmd *cmd = data;
6812 	struct mgmt_mode *cp;
6813 
6814 	bt_dev_dbg(hdev, "err %d", err);
6815 
6816 	if (err) {
6817 		u8 mgmt_err = mgmt_status(err);
6818 
6819 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6820 		goto done;
6821 	}
6822 
6823 	cp = cmd->param;
6824 
6825 	switch (cp->val) {
6826 	case 0x00:
6827 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6828 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6829 		break;
6830 	case 0x01:
6831 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6832 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6833 		break;
6834 	case 0x02:
6835 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6836 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6837 		break;
6838 	}
6839 
6840 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6841 	new_settings(hdev, cmd->sk);
6842 
6843 done:
6844 	mgmt_pending_free(cmd);
6845 }
6846 
set_secure_conn_sync(struct hci_dev * hdev,void * data)6847 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6848 {
6849 	struct mgmt_pending_cmd *cmd = data;
6850 	struct mgmt_mode *cp = cmd->param;
6851 	u8 val = !!cp->val;
6852 
6853 	/* Force write of val */
6854 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6855 
6856 	return hci_write_sc_support_sync(hdev, val);
6857 }
6858 
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6859 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6860 			   void *data, u16 len)
6861 {
6862 	struct mgmt_mode *cp = data;
6863 	struct mgmt_pending_cmd *cmd;
6864 	u8 val;
6865 	int err;
6866 
6867 	bt_dev_dbg(hdev, "sock %p", sk);
6868 
6869 	if (!lmp_sc_capable(hdev) &&
6870 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6871 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6872 				       MGMT_STATUS_NOT_SUPPORTED);
6873 
6874 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6875 	    lmp_sc_capable(hdev) &&
6876 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6878 				       MGMT_STATUS_REJECTED);
6879 
6880 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6882 				       MGMT_STATUS_INVALID_PARAMS);
6883 
6884 	hci_dev_lock(hdev);
6885 
6886 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6887 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6888 		bool changed;
6889 
6890 		if (cp->val) {
6891 			changed = !hci_dev_test_and_set_flag(hdev,
6892 							     HCI_SC_ENABLED);
6893 			if (cp->val == 0x02)
6894 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6895 			else
6896 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6897 		} else {
6898 			changed = hci_dev_test_and_clear_flag(hdev,
6899 							      HCI_SC_ENABLED);
6900 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6901 		}
6902 
6903 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6904 		if (err < 0)
6905 			goto failed;
6906 
6907 		if (changed)
6908 			err = new_settings(hdev, sk);
6909 
6910 		goto failed;
6911 	}
6912 
6913 	val = !!cp->val;
6914 
6915 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6916 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6917 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6918 		goto failed;
6919 	}
6920 
6921 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6922 	if (!cmd)
6923 		err = -ENOMEM;
6924 	else
6925 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6926 					 set_secure_conn_complete);
6927 
6928 	if (err < 0) {
6929 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6930 				MGMT_STATUS_FAILED);
6931 		if (cmd)
6932 			mgmt_pending_free(cmd);
6933 	}
6934 
6935 failed:
6936 	hci_dev_unlock(hdev);
6937 	return err;
6938 }
6939 
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6940 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6941 			  void *data, u16 len)
6942 {
6943 	struct mgmt_mode *cp = data;
6944 	bool changed, use_changed;
6945 	int err;
6946 
6947 	bt_dev_dbg(hdev, "sock %p", sk);
6948 
6949 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6950 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6951 				       MGMT_STATUS_INVALID_PARAMS);
6952 
6953 	hci_dev_lock(hdev);
6954 
6955 	if (cp->val)
6956 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6957 	else
6958 		changed = hci_dev_test_and_clear_flag(hdev,
6959 						      HCI_KEEP_DEBUG_KEYS);
6960 
6961 	if (cp->val == 0x02)
6962 		use_changed = !hci_dev_test_and_set_flag(hdev,
6963 							 HCI_USE_DEBUG_KEYS);
6964 	else
6965 		use_changed = hci_dev_test_and_clear_flag(hdev,
6966 							  HCI_USE_DEBUG_KEYS);
6967 
6968 	if (hdev_is_powered(hdev) && use_changed &&
6969 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6970 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6971 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6972 			     sizeof(mode), &mode);
6973 	}
6974 
6975 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6976 	if (err < 0)
6977 		goto unlock;
6978 
6979 	if (changed)
6980 		err = new_settings(hdev, sk);
6981 
6982 unlock:
6983 	hci_dev_unlock(hdev);
6984 	return err;
6985 }
6986 
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6987 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6988 		       u16 len)
6989 {
6990 	struct mgmt_cp_set_privacy *cp = cp_data;
6991 	bool changed;
6992 	int err;
6993 
6994 	bt_dev_dbg(hdev, "sock %p", sk);
6995 
6996 	if (!lmp_le_capable(hdev))
6997 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6998 				       MGMT_STATUS_NOT_SUPPORTED);
6999 
7000 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7001 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7002 				       MGMT_STATUS_INVALID_PARAMS);
7003 
7004 	if (hdev_is_powered(hdev))
7005 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7006 				       MGMT_STATUS_REJECTED);
7007 
7008 	hci_dev_lock(hdev);
7009 
7010 	/* If user space supports this command it is also expected to
7011 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7012 	 */
7013 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7014 
7015 	if (cp->privacy) {
7016 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7017 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7018 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7019 		hci_adv_instances_set_rpa_expired(hdev, true);
7020 		if (cp->privacy == 0x02)
7021 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7022 		else
7023 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7024 	} else {
7025 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7026 		memset(hdev->irk, 0, sizeof(hdev->irk));
7027 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7028 		hci_adv_instances_set_rpa_expired(hdev, false);
7029 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7030 	}
7031 
7032 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7033 	if (err < 0)
7034 		goto unlock;
7035 
7036 	if (changed)
7037 		err = new_settings(hdev, sk);
7038 
7039 unlock:
7040 	hci_dev_unlock(hdev);
7041 	return err;
7042 }
7043 
irk_is_valid(struct mgmt_irk_info * irk)7044 static bool irk_is_valid(struct mgmt_irk_info *irk)
7045 {
7046 	switch (irk->addr.type) {
7047 	case BDADDR_LE_PUBLIC:
7048 		return true;
7049 
7050 	case BDADDR_LE_RANDOM:
7051 		/* Two most significant bits shall be set */
7052 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7053 			return false;
7054 		return true;
7055 	}
7056 
7057 	return false;
7058 }
7059 
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7060 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7061 		     u16 len)
7062 {
7063 	struct mgmt_cp_load_irks *cp = cp_data;
7064 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7065 				   sizeof(struct mgmt_irk_info));
7066 	u16 irk_count, expected_len;
7067 	int i, err;
7068 
7069 	bt_dev_dbg(hdev, "sock %p", sk);
7070 
7071 	if (!lmp_le_capable(hdev))
7072 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7073 				       MGMT_STATUS_NOT_SUPPORTED);
7074 
7075 	irk_count = __le16_to_cpu(cp->irk_count);
7076 	if (irk_count > max_irk_count) {
7077 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7078 			   irk_count);
7079 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7080 				       MGMT_STATUS_INVALID_PARAMS);
7081 	}
7082 
7083 	expected_len = struct_size(cp, irks, irk_count);
7084 	if (expected_len != len) {
7085 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7086 			   expected_len, len);
7087 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7088 				       MGMT_STATUS_INVALID_PARAMS);
7089 	}
7090 
7091 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7092 
7093 	for (i = 0; i < irk_count; i++) {
7094 		struct mgmt_irk_info *key = &cp->irks[i];
7095 
7096 		if (!irk_is_valid(key))
7097 			return mgmt_cmd_status(sk, hdev->id,
7098 					       MGMT_OP_LOAD_IRKS,
7099 					       MGMT_STATUS_INVALID_PARAMS);
7100 	}
7101 
7102 	hci_dev_lock(hdev);
7103 
7104 	hci_smp_irks_clear(hdev);
7105 
7106 	for (i = 0; i < irk_count; i++) {
7107 		struct mgmt_irk_info *irk = &cp->irks[i];
7108 
7109 		if (hci_is_blocked_key(hdev,
7110 				       HCI_BLOCKED_KEY_TYPE_IRK,
7111 				       irk->val)) {
7112 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7113 				    &irk->addr.bdaddr);
7114 			continue;
7115 		}
7116 
7117 		hci_add_irk(hdev, &irk->addr.bdaddr,
7118 			    le_addr_type(irk->addr.type), irk->val,
7119 			    BDADDR_ANY);
7120 	}
7121 
7122 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7123 
7124 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7125 
7126 	hci_dev_unlock(hdev);
7127 
7128 	return err;
7129 }
7130 
ltk_is_valid(struct mgmt_ltk_info * key)7131 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7132 {
7133 	if (key->initiator != 0x00 && key->initiator != 0x01)
7134 		return false;
7135 
7136 	switch (key->addr.type) {
7137 	case BDADDR_LE_PUBLIC:
7138 		return true;
7139 
7140 	case BDADDR_LE_RANDOM:
7141 		/* Two most significant bits shall be set */
7142 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7143 			return false;
7144 		return true;
7145 	}
7146 
7147 	return false;
7148 }
7149 
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7150 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7151 			       void *cp_data, u16 len)
7152 {
7153 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7154 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7155 				   sizeof(struct mgmt_ltk_info));
7156 	u16 key_count, expected_len;
7157 	int i, err;
7158 
7159 	bt_dev_dbg(hdev, "sock %p", sk);
7160 
7161 	if (!lmp_le_capable(hdev))
7162 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7163 				       MGMT_STATUS_NOT_SUPPORTED);
7164 
7165 	key_count = __le16_to_cpu(cp->key_count);
7166 	if (key_count > max_key_count) {
7167 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7168 			   key_count);
7169 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7170 				       MGMT_STATUS_INVALID_PARAMS);
7171 	}
7172 
7173 	expected_len = struct_size(cp, keys, key_count);
7174 	if (expected_len != len) {
7175 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7176 			   expected_len, len);
7177 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7178 				       MGMT_STATUS_INVALID_PARAMS);
7179 	}
7180 
7181 	bt_dev_dbg(hdev, "key_count %u", key_count);
7182 
7183 	hci_dev_lock(hdev);
7184 
7185 	hci_smp_ltks_clear(hdev);
7186 
7187 	for (i = 0; i < key_count; i++) {
7188 		struct mgmt_ltk_info *key = &cp->keys[i];
7189 		u8 type, authenticated;
7190 
7191 		if (hci_is_blocked_key(hdev,
7192 				       HCI_BLOCKED_KEY_TYPE_LTK,
7193 				       key->val)) {
7194 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7195 				    &key->addr.bdaddr);
7196 			continue;
7197 		}
7198 
7199 		if (!ltk_is_valid(key)) {
7200 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7201 				    &key->addr.bdaddr);
7202 			continue;
7203 		}
7204 
7205 		switch (key->type) {
7206 		case MGMT_LTK_UNAUTHENTICATED:
7207 			authenticated = 0x00;
7208 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7209 			break;
7210 		case MGMT_LTK_AUTHENTICATED:
7211 			authenticated = 0x01;
7212 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7213 			break;
7214 		case MGMT_LTK_P256_UNAUTH:
7215 			authenticated = 0x00;
7216 			type = SMP_LTK_P256;
7217 			break;
7218 		case MGMT_LTK_P256_AUTH:
7219 			authenticated = 0x01;
7220 			type = SMP_LTK_P256;
7221 			break;
7222 		case MGMT_LTK_P256_DEBUG:
7223 			authenticated = 0x00;
7224 			type = SMP_LTK_P256_DEBUG;
7225 			fallthrough;
7226 		default:
7227 			continue;
7228 		}
7229 
7230 		hci_add_ltk(hdev, &key->addr.bdaddr,
7231 			    le_addr_type(key->addr.type), type, authenticated,
7232 			    key->val, key->enc_size, key->ediv, key->rand);
7233 	}
7234 
7235 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7236 			   NULL, 0);
7237 
7238 	hci_dev_unlock(hdev);
7239 
7240 	return err;
7241 }
7242 
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7243 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7244 {
7245 	struct mgmt_pending_cmd *cmd = data;
7246 	struct hci_conn *conn = cmd->user_data;
7247 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7248 	struct mgmt_rp_get_conn_info rp;
7249 	u8 status;
7250 
7251 	bt_dev_dbg(hdev, "err %d", err);
7252 
7253 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7254 
7255 	status = mgmt_status(err);
7256 	if (status == MGMT_STATUS_SUCCESS) {
7257 		rp.rssi = conn->rssi;
7258 		rp.tx_power = conn->tx_power;
7259 		rp.max_tx_power = conn->max_tx_power;
7260 	} else {
7261 		rp.rssi = HCI_RSSI_INVALID;
7262 		rp.tx_power = HCI_TX_POWER_INVALID;
7263 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7264 	}
7265 
7266 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7267 			  &rp, sizeof(rp));
7268 
7269 	mgmt_pending_free(cmd);
7270 }
7271 
get_conn_info_sync(struct hci_dev * hdev,void * data)7272 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7273 {
7274 	struct mgmt_pending_cmd *cmd = data;
7275 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7276 	struct hci_conn *conn;
7277 	int err;
7278 	__le16   handle;
7279 
7280 	/* Make sure we are still connected */
7281 	if (cp->addr.type == BDADDR_BREDR)
7282 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7283 					       &cp->addr.bdaddr);
7284 	else
7285 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7286 
7287 	if (!conn || conn->state != BT_CONNECTED)
7288 		return MGMT_STATUS_NOT_CONNECTED;
7289 
7290 	cmd->user_data = conn;
7291 	handle = cpu_to_le16(conn->handle);
7292 
7293 	/* Refresh RSSI each time */
7294 	err = hci_read_rssi_sync(hdev, handle);
7295 
7296 	/* For LE links TX power does not change thus we don't need to
7297 	 * query for it once value is known.
7298 	 */
7299 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7300 		     conn->tx_power == HCI_TX_POWER_INVALID))
7301 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7302 
7303 	/* Max TX power needs to be read only once per connection */
7304 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7305 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7306 
7307 	return err;
7308 }
7309 
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7310 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7311 			 u16 len)
7312 {
7313 	struct mgmt_cp_get_conn_info *cp = data;
7314 	struct mgmt_rp_get_conn_info rp;
7315 	struct hci_conn *conn;
7316 	unsigned long conn_info_age;
7317 	int err = 0;
7318 
7319 	bt_dev_dbg(hdev, "sock %p", sk);
7320 
7321 	memset(&rp, 0, sizeof(rp));
7322 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7323 	rp.addr.type = cp->addr.type;
7324 
7325 	if (!bdaddr_type_is_valid(cp->addr.type))
7326 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7327 					 MGMT_STATUS_INVALID_PARAMS,
7328 					 &rp, sizeof(rp));
7329 
7330 	hci_dev_lock(hdev);
7331 
7332 	if (!hdev_is_powered(hdev)) {
7333 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7334 					MGMT_STATUS_NOT_POWERED, &rp,
7335 					sizeof(rp));
7336 		goto unlock;
7337 	}
7338 
7339 	if (cp->addr.type == BDADDR_BREDR)
7340 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7341 					       &cp->addr.bdaddr);
7342 	else
7343 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7344 
7345 	if (!conn || conn->state != BT_CONNECTED) {
7346 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7347 					MGMT_STATUS_NOT_CONNECTED, &rp,
7348 					sizeof(rp));
7349 		goto unlock;
7350 	}
7351 
7352 	/* To avoid client trying to guess when to poll again for information we
7353 	 * calculate conn info age as random value between min/max set in hdev.
7354 	 */
7355 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7356 						 hdev->conn_info_max_age - 1);
7357 
7358 	/* Query controller to refresh cached values if they are too old or were
7359 	 * never read.
7360 	 */
7361 	if (time_after(jiffies, conn->conn_info_timestamp +
7362 		       msecs_to_jiffies(conn_info_age)) ||
7363 	    !conn->conn_info_timestamp) {
7364 		struct mgmt_pending_cmd *cmd;
7365 
7366 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7367 				       len);
7368 		if (!cmd) {
7369 			err = -ENOMEM;
7370 		} else {
7371 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7372 						 cmd, get_conn_info_complete);
7373 		}
7374 
7375 		if (err < 0) {
7376 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7377 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7378 
7379 			if (cmd)
7380 				mgmt_pending_free(cmd);
7381 
7382 			goto unlock;
7383 		}
7384 
7385 		conn->conn_info_timestamp = jiffies;
7386 	} else {
7387 		/* Cache is valid, just reply with values cached in hci_conn */
7388 		rp.rssi = conn->rssi;
7389 		rp.tx_power = conn->tx_power;
7390 		rp.max_tx_power = conn->max_tx_power;
7391 
7392 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7393 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7394 	}
7395 
7396 unlock:
7397 	hci_dev_unlock(hdev);
7398 	return err;
7399 }
7400 
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7401 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7402 {
7403 	struct mgmt_pending_cmd *cmd = data;
7404 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7405 	struct mgmt_rp_get_clock_info rp;
7406 	struct hci_conn *conn = cmd->user_data;
7407 	u8 status = mgmt_status(err);
7408 
7409 	bt_dev_dbg(hdev, "err %d", err);
7410 
7411 	memset(&rp, 0, sizeof(rp));
7412 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7413 	rp.addr.type = cp->addr.type;
7414 
7415 	if (err)
7416 		goto complete;
7417 
7418 	rp.local_clock = cpu_to_le32(hdev->clock);
7419 
7420 	if (conn) {
7421 		rp.piconet_clock = cpu_to_le32(conn->clock);
7422 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7423 	}
7424 
7425 complete:
7426 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7427 			  sizeof(rp));
7428 
7429 	mgmt_pending_free(cmd);
7430 }
7431 
get_clock_info_sync(struct hci_dev * hdev,void * data)7432 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7433 {
7434 	struct mgmt_pending_cmd *cmd = data;
7435 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7436 	struct hci_cp_read_clock hci_cp;
7437 	struct hci_conn *conn;
7438 
7439 	memset(&hci_cp, 0, sizeof(hci_cp));
7440 	hci_read_clock_sync(hdev, &hci_cp);
7441 
7442 	/* Make sure connection still exists */
7443 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7444 	if (!conn || conn->state != BT_CONNECTED)
7445 		return MGMT_STATUS_NOT_CONNECTED;
7446 
7447 	cmd->user_data = conn;
7448 	hci_cp.handle = cpu_to_le16(conn->handle);
7449 	hci_cp.which = 0x01; /* Piconet clock */
7450 
7451 	return hci_read_clock_sync(hdev, &hci_cp);
7452 }
7453 
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7454 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7455 								u16 len)
7456 {
7457 	struct mgmt_cp_get_clock_info *cp = data;
7458 	struct mgmt_rp_get_clock_info rp;
7459 	struct mgmt_pending_cmd *cmd;
7460 	struct hci_conn *conn;
7461 	int err;
7462 
7463 	bt_dev_dbg(hdev, "sock %p", sk);
7464 
7465 	memset(&rp, 0, sizeof(rp));
7466 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7467 	rp.addr.type = cp->addr.type;
7468 
7469 	if (cp->addr.type != BDADDR_BREDR)
7470 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7471 					 MGMT_STATUS_INVALID_PARAMS,
7472 					 &rp, sizeof(rp));
7473 
7474 	hci_dev_lock(hdev);
7475 
7476 	if (!hdev_is_powered(hdev)) {
7477 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7478 					MGMT_STATUS_NOT_POWERED, &rp,
7479 					sizeof(rp));
7480 		goto unlock;
7481 	}
7482 
7483 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7484 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7485 					       &cp->addr.bdaddr);
7486 		if (!conn || conn->state != BT_CONNECTED) {
7487 			err = mgmt_cmd_complete(sk, hdev->id,
7488 						MGMT_OP_GET_CLOCK_INFO,
7489 						MGMT_STATUS_NOT_CONNECTED,
7490 						&rp, sizeof(rp));
7491 			goto unlock;
7492 		}
7493 	} else {
7494 		conn = NULL;
7495 	}
7496 
7497 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7498 	if (!cmd)
7499 		err = -ENOMEM;
7500 	else
7501 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7502 					 get_clock_info_complete);
7503 
7504 	if (err < 0) {
7505 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7506 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7507 
7508 		if (cmd)
7509 			mgmt_pending_free(cmd);
7510 	}
7511 
7512 
7513 unlock:
7514 	hci_dev_unlock(hdev);
7515 	return err;
7516 }
7517 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7518 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7519 {
7520 	struct hci_conn *conn;
7521 
7522 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7523 	if (!conn)
7524 		return false;
7525 
7526 	if (conn->dst_type != type)
7527 		return false;
7528 
7529 	if (conn->state != BT_CONNECTED)
7530 		return false;
7531 
7532 	return true;
7533 }
7534 
7535 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7536 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7537 			       u8 addr_type, u8 auto_connect)
7538 {
7539 	struct hci_conn_params *params;
7540 
7541 	params = hci_conn_params_add(hdev, addr, addr_type);
7542 	if (!params)
7543 		return -EIO;
7544 
7545 	if (params->auto_connect == auto_connect)
7546 		return 0;
7547 
7548 	hci_pend_le_list_del_init(params);
7549 
7550 	switch (auto_connect) {
7551 	case HCI_AUTO_CONN_DISABLED:
7552 	case HCI_AUTO_CONN_LINK_LOSS:
7553 		/* If auto connect is being disabled when we're trying to
7554 		 * connect to device, keep connecting.
7555 		 */
7556 		if (params->explicit_connect)
7557 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7558 		break;
7559 	case HCI_AUTO_CONN_REPORT:
7560 		if (params->explicit_connect)
7561 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7562 		else
7563 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7564 		break;
7565 	case HCI_AUTO_CONN_DIRECT:
7566 	case HCI_AUTO_CONN_ALWAYS:
7567 		if (!is_connected(hdev, addr, addr_type))
7568 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7569 		break;
7570 	}
7571 
7572 	params->auto_connect = auto_connect;
7573 
7574 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7575 		   addr, addr_type, auto_connect);
7576 
7577 	return 0;
7578 }
7579 
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7580 static void device_added(struct sock *sk, struct hci_dev *hdev,
7581 			 bdaddr_t *bdaddr, u8 type, u8 action)
7582 {
7583 	struct mgmt_ev_device_added ev;
7584 
7585 	bacpy(&ev.addr.bdaddr, bdaddr);
7586 	ev.addr.type = type;
7587 	ev.action = action;
7588 
7589 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7590 }
7591 
add_device_complete(struct hci_dev * hdev,void * data,int err)7592 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7593 {
7594 	struct mgmt_pending_cmd *cmd = data;
7595 	struct mgmt_cp_add_device *cp = cmd->param;
7596 
7597 	if (!err) {
7598 		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7599 			     cp->action);
7600 		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7601 				     cp->addr.type, hdev->conn_flags,
7602 				     PTR_UINT(cmd->user_data));
7603 	}
7604 
7605 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7606 			  mgmt_status(err), &cp->addr, sizeof(cp->addr));
7607 	mgmt_pending_free(cmd);
7608 }
7609 
add_device_sync(struct hci_dev * hdev,void * data)7610 static int add_device_sync(struct hci_dev *hdev, void *data)
7611 {
7612 	return hci_update_passive_scan_sync(hdev);
7613 }
7614 
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7615 static int add_device(struct sock *sk, struct hci_dev *hdev,
7616 		      void *data, u16 len)
7617 {
7618 	struct mgmt_pending_cmd *cmd;
7619 	struct mgmt_cp_add_device *cp = data;
7620 	u8 auto_conn, addr_type;
7621 	struct hci_conn_params *params;
7622 	int err;
7623 	u32 current_flags = 0;
7624 	u32 supported_flags;
7625 
7626 	bt_dev_dbg(hdev, "sock %p", sk);
7627 
7628 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7629 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7630 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7631 					 MGMT_STATUS_INVALID_PARAMS,
7632 					 &cp->addr, sizeof(cp->addr));
7633 
7634 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7635 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7636 					 MGMT_STATUS_INVALID_PARAMS,
7637 					 &cp->addr, sizeof(cp->addr));
7638 
7639 	hci_dev_lock(hdev);
7640 
7641 	if (cp->addr.type == BDADDR_BREDR) {
7642 		/* Only incoming connections action is supported for now */
7643 		if (cp->action != 0x01) {
7644 			err = mgmt_cmd_complete(sk, hdev->id,
7645 						MGMT_OP_ADD_DEVICE,
7646 						MGMT_STATUS_INVALID_PARAMS,
7647 						&cp->addr, sizeof(cp->addr));
7648 			goto unlock;
7649 		}
7650 
7651 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7652 						     &cp->addr.bdaddr,
7653 						     cp->addr.type, 0);
7654 		if (err)
7655 			goto unlock;
7656 
7657 		hci_update_scan(hdev);
7658 
7659 		goto added;
7660 	}
7661 
7662 	addr_type = le_addr_type(cp->addr.type);
7663 
7664 	if (cp->action == 0x02)
7665 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7666 	else if (cp->action == 0x01)
7667 		auto_conn = HCI_AUTO_CONN_DIRECT;
7668 	else
7669 		auto_conn = HCI_AUTO_CONN_REPORT;
7670 
7671 	/* Kernel internally uses conn_params with resolvable private
7672 	 * address, but Add Device allows only identity addresses.
7673 	 * Make sure it is enforced before calling
7674 	 * hci_conn_params_lookup.
7675 	 */
7676 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7677 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7678 					MGMT_STATUS_INVALID_PARAMS,
7679 					&cp->addr, sizeof(cp->addr));
7680 		goto unlock;
7681 	}
7682 
7683 	/* If the connection parameters don't exist for this device,
7684 	 * they will be created and configured with defaults.
7685 	 */
7686 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7687 				auto_conn) < 0) {
7688 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7689 					MGMT_STATUS_FAILED, &cp->addr,
7690 					sizeof(cp->addr));
7691 		goto unlock;
7692 	} else {
7693 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7694 						addr_type);
7695 		if (params)
7696 			current_flags = params->flags;
7697 	}
7698 
7699 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7700 	if (!cmd) {
7701 		err = -ENOMEM;
7702 		goto unlock;
7703 	}
7704 
7705 	cmd->user_data = UINT_PTR(current_flags);
7706 
7707 	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7708 				 add_device_complete);
7709 	if (err < 0) {
7710 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7711 					MGMT_STATUS_FAILED, &cp->addr,
7712 					sizeof(cp->addr));
7713 		mgmt_pending_free(cmd);
7714 	}
7715 
7716 	goto unlock;
7717 
7718 added:
7719 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7720 	supported_flags = hdev->conn_flags;
7721 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7722 			     supported_flags, current_flags);
7723 
7724 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7725 				MGMT_STATUS_SUCCESS, &cp->addr,
7726 				sizeof(cp->addr));
7727 
7728 unlock:
7729 	hci_dev_unlock(hdev);
7730 	return err;
7731 }
7732 
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7733 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7734 			   bdaddr_t *bdaddr, u8 type)
7735 {
7736 	struct mgmt_ev_device_removed ev;
7737 
7738 	bacpy(&ev.addr.bdaddr, bdaddr);
7739 	ev.addr.type = type;
7740 
7741 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7742 }
7743 
remove_device_sync(struct hci_dev * hdev,void * data)7744 static int remove_device_sync(struct hci_dev *hdev, void *data)
7745 {
7746 	return hci_update_passive_scan_sync(hdev);
7747 }
7748 
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7749 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7750 			 void *data, u16 len)
7751 {
7752 	struct mgmt_cp_remove_device *cp = data;
7753 	int err;
7754 
7755 	bt_dev_dbg(hdev, "sock %p", sk);
7756 
7757 	hci_dev_lock(hdev);
7758 
7759 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7760 		struct hci_conn_params *params;
7761 		u8 addr_type;
7762 
7763 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7764 			err = mgmt_cmd_complete(sk, hdev->id,
7765 						MGMT_OP_REMOVE_DEVICE,
7766 						MGMT_STATUS_INVALID_PARAMS,
7767 						&cp->addr, sizeof(cp->addr));
7768 			goto unlock;
7769 		}
7770 
7771 		if (cp->addr.type == BDADDR_BREDR) {
7772 			err = hci_bdaddr_list_del(&hdev->accept_list,
7773 						  &cp->addr.bdaddr,
7774 						  cp->addr.type);
7775 			if (err) {
7776 				err = mgmt_cmd_complete(sk, hdev->id,
7777 							MGMT_OP_REMOVE_DEVICE,
7778 							MGMT_STATUS_INVALID_PARAMS,
7779 							&cp->addr,
7780 							sizeof(cp->addr));
7781 				goto unlock;
7782 			}
7783 
7784 			hci_update_scan(hdev);
7785 
7786 			device_removed(sk, hdev, &cp->addr.bdaddr,
7787 				       cp->addr.type);
7788 			goto complete;
7789 		}
7790 
7791 		addr_type = le_addr_type(cp->addr.type);
7792 
7793 		/* Kernel internally uses conn_params with resolvable private
7794 		 * address, but Remove Device allows only identity addresses.
7795 		 * Make sure it is enforced before calling
7796 		 * hci_conn_params_lookup.
7797 		 */
7798 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7799 			err = mgmt_cmd_complete(sk, hdev->id,
7800 						MGMT_OP_REMOVE_DEVICE,
7801 						MGMT_STATUS_INVALID_PARAMS,
7802 						&cp->addr, sizeof(cp->addr));
7803 			goto unlock;
7804 		}
7805 
7806 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7807 						addr_type);
7808 		if (!params) {
7809 			err = mgmt_cmd_complete(sk, hdev->id,
7810 						MGMT_OP_REMOVE_DEVICE,
7811 						MGMT_STATUS_INVALID_PARAMS,
7812 						&cp->addr, sizeof(cp->addr));
7813 			goto unlock;
7814 		}
7815 
7816 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7817 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7818 			err = mgmt_cmd_complete(sk, hdev->id,
7819 						MGMT_OP_REMOVE_DEVICE,
7820 						MGMT_STATUS_INVALID_PARAMS,
7821 						&cp->addr, sizeof(cp->addr));
7822 			goto unlock;
7823 		}
7824 
7825 		hci_conn_params_free(params);
7826 
7827 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7828 	} else {
7829 		struct hci_conn_params *p, *tmp;
7830 		struct bdaddr_list *b, *btmp;
7831 
7832 		if (cp->addr.type) {
7833 			err = mgmt_cmd_complete(sk, hdev->id,
7834 						MGMT_OP_REMOVE_DEVICE,
7835 						MGMT_STATUS_INVALID_PARAMS,
7836 						&cp->addr, sizeof(cp->addr));
7837 			goto unlock;
7838 		}
7839 
7840 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7841 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7842 			list_del(&b->list);
7843 			kfree(b);
7844 		}
7845 
7846 		hci_update_scan(hdev);
7847 
7848 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7849 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7850 				continue;
7851 			device_removed(sk, hdev, &p->addr, p->addr_type);
7852 			if (p->explicit_connect) {
7853 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7854 				continue;
7855 			}
7856 			hci_conn_params_free(p);
7857 		}
7858 
7859 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7860 	}
7861 
7862 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7863 
7864 complete:
7865 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7866 				MGMT_STATUS_SUCCESS, &cp->addr,
7867 				sizeof(cp->addr));
7868 unlock:
7869 	hci_dev_unlock(hdev);
7870 	return err;
7871 }
7872 
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7873 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7874 			   u16 len)
7875 {
7876 	struct mgmt_cp_load_conn_param *cp = data;
7877 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7878 				     sizeof(struct mgmt_conn_param));
7879 	u16 param_count, expected_len;
7880 	int i;
7881 
7882 	if (!lmp_le_capable(hdev))
7883 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7884 				       MGMT_STATUS_NOT_SUPPORTED);
7885 
7886 	param_count = __le16_to_cpu(cp->param_count);
7887 	if (param_count > max_param_count) {
7888 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7889 			   param_count);
7890 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7891 				       MGMT_STATUS_INVALID_PARAMS);
7892 	}
7893 
7894 	expected_len = struct_size(cp, params, param_count);
7895 	if (expected_len != len) {
7896 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7897 			   expected_len, len);
7898 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7899 				       MGMT_STATUS_INVALID_PARAMS);
7900 	}
7901 
7902 	bt_dev_dbg(hdev, "param_count %u", param_count);
7903 
7904 	hci_dev_lock(hdev);
7905 
7906 	hci_conn_params_clear_disabled(hdev);
7907 
7908 	for (i = 0; i < param_count; i++) {
7909 		struct mgmt_conn_param *param = &cp->params[i];
7910 		struct hci_conn_params *hci_param;
7911 		u16 min, max, latency, timeout;
7912 		u8 addr_type;
7913 
7914 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7915 			   param->addr.type);
7916 
7917 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7918 			addr_type = ADDR_LE_DEV_PUBLIC;
7919 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7920 			addr_type = ADDR_LE_DEV_RANDOM;
7921 		} else {
7922 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7923 			continue;
7924 		}
7925 
7926 		min = le16_to_cpu(param->min_interval);
7927 		max = le16_to_cpu(param->max_interval);
7928 		latency = le16_to_cpu(param->latency);
7929 		timeout = le16_to_cpu(param->timeout);
7930 
7931 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7932 			   min, max, latency, timeout);
7933 
7934 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7935 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7936 			continue;
7937 		}
7938 
7939 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7940 						addr_type);
7941 		if (!hci_param) {
7942 			bt_dev_err(hdev, "failed to add connection parameters");
7943 			continue;
7944 		}
7945 
7946 		hci_param->conn_min_interval = min;
7947 		hci_param->conn_max_interval = max;
7948 		hci_param->conn_latency = latency;
7949 		hci_param->supervision_timeout = timeout;
7950 	}
7951 
7952 	hci_dev_unlock(hdev);
7953 
7954 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7955 				 NULL, 0);
7956 }
7957 
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7958 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7959 			       void *data, u16 len)
7960 {
7961 	struct mgmt_cp_set_external_config *cp = data;
7962 	bool changed;
7963 	int err;
7964 
7965 	bt_dev_dbg(hdev, "sock %p", sk);
7966 
7967 	if (hdev_is_powered(hdev))
7968 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7969 				       MGMT_STATUS_REJECTED);
7970 
7971 	if (cp->config != 0x00 && cp->config != 0x01)
7972 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7973 				         MGMT_STATUS_INVALID_PARAMS);
7974 
7975 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7976 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7977 				       MGMT_STATUS_NOT_SUPPORTED);
7978 
7979 	hci_dev_lock(hdev);
7980 
7981 	if (cp->config)
7982 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7983 	else
7984 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7985 
7986 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7987 	if (err < 0)
7988 		goto unlock;
7989 
7990 	if (!changed)
7991 		goto unlock;
7992 
7993 	err = new_options(hdev, sk);
7994 
7995 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7996 		mgmt_index_removed(hdev);
7997 
7998 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7999 			hci_dev_set_flag(hdev, HCI_CONFIG);
8000 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8001 
8002 			queue_work(hdev->req_workqueue, &hdev->power_on);
8003 		} else {
8004 			set_bit(HCI_RAW, &hdev->flags);
8005 			mgmt_index_added(hdev);
8006 		}
8007 	}
8008 
8009 unlock:
8010 	hci_dev_unlock(hdev);
8011 	return err;
8012 }
8013 
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8014 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8015 			      void *data, u16 len)
8016 {
8017 	struct mgmt_cp_set_public_address *cp = data;
8018 	bool changed;
8019 	int err;
8020 
8021 	bt_dev_dbg(hdev, "sock %p", sk);
8022 
8023 	if (hdev_is_powered(hdev))
8024 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8025 				       MGMT_STATUS_REJECTED);
8026 
8027 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8028 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8029 				       MGMT_STATUS_INVALID_PARAMS);
8030 
8031 	if (!hdev->set_bdaddr)
8032 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8033 				       MGMT_STATUS_NOT_SUPPORTED);
8034 
8035 	hci_dev_lock(hdev);
8036 
8037 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8038 	bacpy(&hdev->public_addr, &cp->bdaddr);
8039 
8040 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8041 	if (err < 0)
8042 		goto unlock;
8043 
8044 	if (!changed)
8045 		goto unlock;
8046 
8047 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8048 		err = new_options(hdev, sk);
8049 
8050 	if (is_configured(hdev)) {
8051 		mgmt_index_removed(hdev);
8052 
8053 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8054 
8055 		hci_dev_set_flag(hdev, HCI_CONFIG);
8056 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8057 
8058 		queue_work(hdev->req_workqueue, &hdev->power_on);
8059 	}
8060 
8061 unlock:
8062 	hci_dev_unlock(hdev);
8063 	return err;
8064 }
8065 
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8066 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8067 					     int err)
8068 {
8069 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8070 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8071 	u8 *h192, *r192, *h256, *r256;
8072 	struct mgmt_pending_cmd *cmd = data;
8073 	struct sk_buff *skb = cmd->skb;
8074 	u8 status = mgmt_status(err);
8075 	u16 eir_len;
8076 
8077 	if (err == -ECANCELED ||
8078 	    cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8079 		return;
8080 
8081 	if (!status) {
8082 		if (!skb)
8083 			status = MGMT_STATUS_FAILED;
8084 		else if (IS_ERR(skb))
8085 			status = mgmt_status(PTR_ERR(skb));
8086 		else
8087 			status = mgmt_status(skb->data[0]);
8088 	}
8089 
8090 	bt_dev_dbg(hdev, "status %u", status);
8091 
8092 	mgmt_cp = cmd->param;
8093 
8094 	if (status) {
8095 		status = mgmt_status(status);
8096 		eir_len = 0;
8097 
8098 		h192 = NULL;
8099 		r192 = NULL;
8100 		h256 = NULL;
8101 		r256 = NULL;
8102 	} else if (!bredr_sc_enabled(hdev)) {
8103 		struct hci_rp_read_local_oob_data *rp;
8104 
8105 		if (skb->len != sizeof(*rp)) {
8106 			status = MGMT_STATUS_FAILED;
8107 			eir_len = 0;
8108 		} else {
8109 			status = MGMT_STATUS_SUCCESS;
8110 			rp = (void *)skb->data;
8111 
8112 			eir_len = 5 + 18 + 18;
8113 			h192 = rp->hash;
8114 			r192 = rp->rand;
8115 			h256 = NULL;
8116 			r256 = NULL;
8117 		}
8118 	} else {
8119 		struct hci_rp_read_local_oob_ext_data *rp;
8120 
8121 		if (skb->len != sizeof(*rp)) {
8122 			status = MGMT_STATUS_FAILED;
8123 			eir_len = 0;
8124 		} else {
8125 			status = MGMT_STATUS_SUCCESS;
8126 			rp = (void *)skb->data;
8127 
8128 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8129 				eir_len = 5 + 18 + 18;
8130 				h192 = NULL;
8131 				r192 = NULL;
8132 			} else {
8133 				eir_len = 5 + 18 + 18 + 18 + 18;
8134 				h192 = rp->hash192;
8135 				r192 = rp->rand192;
8136 			}
8137 
8138 			h256 = rp->hash256;
8139 			r256 = rp->rand256;
8140 		}
8141 	}
8142 
8143 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8144 	if (!mgmt_rp)
8145 		goto done;
8146 
8147 	if (eir_len == 0)
8148 		goto send_rsp;
8149 
8150 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8151 				  hdev->dev_class, 3);
8152 
8153 	if (h192 && r192) {
8154 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8155 					  EIR_SSP_HASH_C192, h192, 16);
8156 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8157 					  EIR_SSP_RAND_R192, r192, 16);
8158 	}
8159 
8160 	if (h256 && r256) {
8161 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8162 					  EIR_SSP_HASH_C256, h256, 16);
8163 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8164 					  EIR_SSP_RAND_R256, r256, 16);
8165 	}
8166 
8167 send_rsp:
8168 	mgmt_rp->type = mgmt_cp->type;
8169 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8170 
8171 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8172 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8173 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8174 	if (err < 0 || status)
8175 		goto done;
8176 
8177 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8178 
8179 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8180 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8181 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8182 done:
8183 	if (skb && !IS_ERR(skb))
8184 		kfree_skb(skb);
8185 
8186 	kfree(mgmt_rp);
8187 	mgmt_pending_remove(cmd);
8188 }
8189 
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8190 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8191 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8192 {
8193 	struct mgmt_pending_cmd *cmd;
8194 	int err;
8195 
8196 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8197 			       cp, sizeof(*cp));
8198 	if (!cmd)
8199 		return -ENOMEM;
8200 
8201 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8202 				 read_local_oob_ext_data_complete);
8203 
8204 	if (err < 0) {
8205 		mgmt_pending_remove(cmd);
8206 		return err;
8207 	}
8208 
8209 	return 0;
8210 }
8211 
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8212 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8213 				   void *data, u16 data_len)
8214 {
8215 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8216 	struct mgmt_rp_read_local_oob_ext_data *rp;
8217 	size_t rp_len;
8218 	u16 eir_len;
8219 	u8 status, flags, role, addr[7], hash[16], rand[16];
8220 	int err;
8221 
8222 	bt_dev_dbg(hdev, "sock %p", sk);
8223 
8224 	if (hdev_is_powered(hdev)) {
8225 		switch (cp->type) {
8226 		case BIT(BDADDR_BREDR):
8227 			status = mgmt_bredr_support(hdev);
8228 			if (status)
8229 				eir_len = 0;
8230 			else
8231 				eir_len = 5;
8232 			break;
8233 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8234 			status = mgmt_le_support(hdev);
8235 			if (status)
8236 				eir_len = 0;
8237 			else
8238 				eir_len = 9 + 3 + 18 + 18 + 3;
8239 			break;
8240 		default:
8241 			status = MGMT_STATUS_INVALID_PARAMS;
8242 			eir_len = 0;
8243 			break;
8244 		}
8245 	} else {
8246 		status = MGMT_STATUS_NOT_POWERED;
8247 		eir_len = 0;
8248 	}
8249 
8250 	rp_len = sizeof(*rp) + eir_len;
8251 	rp = kmalloc(rp_len, GFP_ATOMIC);
8252 	if (!rp)
8253 		return -ENOMEM;
8254 
8255 	if (!status && !lmp_ssp_capable(hdev)) {
8256 		status = MGMT_STATUS_NOT_SUPPORTED;
8257 		eir_len = 0;
8258 	}
8259 
8260 	if (status)
8261 		goto complete;
8262 
8263 	hci_dev_lock(hdev);
8264 
8265 	eir_len = 0;
8266 	switch (cp->type) {
8267 	case BIT(BDADDR_BREDR):
8268 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8269 			err = read_local_ssp_oob_req(hdev, sk, cp);
8270 			hci_dev_unlock(hdev);
8271 			if (!err)
8272 				goto done;
8273 
8274 			status = MGMT_STATUS_FAILED;
8275 			goto complete;
8276 		} else {
8277 			eir_len = eir_append_data(rp->eir, eir_len,
8278 						  EIR_CLASS_OF_DEV,
8279 						  hdev->dev_class, 3);
8280 		}
8281 		break;
8282 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8283 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8284 		    smp_generate_oob(hdev, hash, rand) < 0) {
8285 			hci_dev_unlock(hdev);
8286 			status = MGMT_STATUS_FAILED;
8287 			goto complete;
8288 		}
8289 
8290 		/* This should return the active RPA, but since the RPA
8291 		 * is only programmed on demand, it is really hard to fill
8292 		 * this in at the moment. For now disallow retrieving
8293 		 * local out-of-band data when privacy is in use.
8294 		 *
8295 		 * Returning the identity address will not help here since
8296 		 * pairing happens before the identity resolving key is
8297 		 * known and thus the connection establishment happens
8298 		 * based on the RPA and not the identity address.
8299 		 */
8300 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8301 			hci_dev_unlock(hdev);
8302 			status = MGMT_STATUS_REJECTED;
8303 			goto complete;
8304 		}
8305 
8306 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8307 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8308 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8309 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8310 			memcpy(addr, &hdev->static_addr, 6);
8311 			addr[6] = 0x01;
8312 		} else {
8313 			memcpy(addr, &hdev->bdaddr, 6);
8314 			addr[6] = 0x00;
8315 		}
8316 
8317 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8318 					  addr, sizeof(addr));
8319 
8320 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8321 			role = 0x02;
8322 		else
8323 			role = 0x01;
8324 
8325 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8326 					  &role, sizeof(role));
8327 
8328 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8329 			eir_len = eir_append_data(rp->eir, eir_len,
8330 						  EIR_LE_SC_CONFIRM,
8331 						  hash, sizeof(hash));
8332 
8333 			eir_len = eir_append_data(rp->eir, eir_len,
8334 						  EIR_LE_SC_RANDOM,
8335 						  rand, sizeof(rand));
8336 		}
8337 
8338 		flags = mgmt_get_adv_discov_flags(hdev);
8339 
8340 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8341 			flags |= LE_AD_NO_BREDR;
8342 
8343 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8344 					  &flags, sizeof(flags));
8345 		break;
8346 	}
8347 
8348 	hci_dev_unlock(hdev);
8349 
8350 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8351 
8352 	status = MGMT_STATUS_SUCCESS;
8353 
8354 complete:
8355 	rp->type = cp->type;
8356 	rp->eir_len = cpu_to_le16(eir_len);
8357 
8358 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8359 				status, rp, sizeof(*rp) + eir_len);
8360 	if (err < 0 || status)
8361 		goto done;
8362 
8363 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8364 				 rp, sizeof(*rp) + eir_len,
8365 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8366 
8367 done:
8368 	kfree(rp);
8369 
8370 	return err;
8371 }
8372 
get_supported_adv_flags(struct hci_dev * hdev)8373 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8374 {
8375 	u32 flags = 0;
8376 
8377 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8378 	flags |= MGMT_ADV_FLAG_DISCOV;
8379 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8380 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8381 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8382 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8383 	flags |= MGMT_ADV_PARAM_DURATION;
8384 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8385 	flags |= MGMT_ADV_PARAM_INTERVALS;
8386 	flags |= MGMT_ADV_PARAM_TX_POWER;
8387 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8388 
8389 	/* In extended adv TX_POWER returned from Set Adv Param
8390 	 * will be always valid.
8391 	 */
8392 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8393 		flags |= MGMT_ADV_FLAG_TX_POWER;
8394 
8395 	if (ext_adv_capable(hdev)) {
8396 		flags |= MGMT_ADV_FLAG_SEC_1M;
8397 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8398 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8399 
8400 		if (le_2m_capable(hdev))
8401 			flags |= MGMT_ADV_FLAG_SEC_2M;
8402 
8403 		if (le_coded_capable(hdev))
8404 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8405 	}
8406 
8407 	return flags;
8408 }
8409 
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8410 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8411 			     void *data, u16 data_len)
8412 {
8413 	struct mgmt_rp_read_adv_features *rp;
8414 	size_t rp_len;
8415 	int err;
8416 	struct adv_info *adv_instance;
8417 	u32 supported_flags;
8418 	u8 *instance;
8419 
8420 	bt_dev_dbg(hdev, "sock %p", sk);
8421 
8422 	if (!lmp_le_capable(hdev))
8423 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8424 				       MGMT_STATUS_REJECTED);
8425 
8426 	hci_dev_lock(hdev);
8427 
8428 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8429 	rp = kmalloc(rp_len, GFP_ATOMIC);
8430 	if (!rp) {
8431 		hci_dev_unlock(hdev);
8432 		return -ENOMEM;
8433 	}
8434 
8435 	supported_flags = get_supported_adv_flags(hdev);
8436 
8437 	rp->supported_flags = cpu_to_le32(supported_flags);
8438 	rp->max_adv_data_len = max_adv_len(hdev);
8439 	rp->max_scan_rsp_len = max_adv_len(hdev);
8440 	rp->max_instances = hdev->le_num_of_adv_sets;
8441 	rp->num_instances = hdev->adv_instance_cnt;
8442 
8443 	instance = rp->instance;
8444 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8445 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8446 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8447 			*instance = adv_instance->instance;
8448 			instance++;
8449 		} else {
8450 			rp->num_instances--;
8451 			rp_len--;
8452 		}
8453 	}
8454 
8455 	hci_dev_unlock(hdev);
8456 
8457 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8458 				MGMT_STATUS_SUCCESS, rp, rp_len);
8459 
8460 	kfree(rp);
8461 
8462 	return err;
8463 }
8464 
calculate_name_len(struct hci_dev * hdev)8465 static u8 calculate_name_len(struct hci_dev *hdev)
8466 {
8467 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8468 
8469 	return eir_append_local_name(hdev, buf, 0);
8470 }
8471 
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8472 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8473 			   bool is_adv_data)
8474 {
8475 	u8 max_len = max_adv_len(hdev);
8476 
8477 	if (is_adv_data) {
8478 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8479 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8480 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8481 			max_len -= 3;
8482 
8483 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8484 			max_len -= 3;
8485 	} else {
8486 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8487 			max_len -= calculate_name_len(hdev);
8488 
8489 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8490 			max_len -= 4;
8491 	}
8492 
8493 	return max_len;
8494 }
8495 
flags_managed(u32 adv_flags)8496 static bool flags_managed(u32 adv_flags)
8497 {
8498 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8499 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8500 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8501 }
8502 
tx_power_managed(u32 adv_flags)8503 static bool tx_power_managed(u32 adv_flags)
8504 {
8505 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8506 }
8507 
name_managed(u32 adv_flags)8508 static bool name_managed(u32 adv_flags)
8509 {
8510 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8511 }
8512 
appearance_managed(u32 adv_flags)8513 static bool appearance_managed(u32 adv_flags)
8514 {
8515 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8516 }
8517 
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8518 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8519 			      u8 len, bool is_adv_data)
8520 {
8521 	int i, cur_len;
8522 	u8 max_len;
8523 
8524 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8525 
8526 	if (len > max_len)
8527 		return false;
8528 
8529 	/* Make sure that the data is correctly formatted. */
8530 	for (i = 0; i < len; i += (cur_len + 1)) {
8531 		cur_len = data[i];
8532 
8533 		if (!cur_len)
8534 			continue;
8535 
8536 		if (data[i + 1] == EIR_FLAGS &&
8537 		    (!is_adv_data || flags_managed(adv_flags)))
8538 			return false;
8539 
8540 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8541 			return false;
8542 
8543 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8544 			return false;
8545 
8546 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8547 			return false;
8548 
8549 		if (data[i + 1] == EIR_APPEARANCE &&
8550 		    appearance_managed(adv_flags))
8551 			return false;
8552 
8553 		/* If the current field length would exceed the total data
8554 		 * length, then it's invalid.
8555 		 */
8556 		if (i + cur_len >= len)
8557 			return false;
8558 	}
8559 
8560 	return true;
8561 }
8562 
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8563 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8564 {
8565 	u32 supported_flags, phy_flags;
8566 
8567 	/* The current implementation only supports a subset of the specified
8568 	 * flags. Also need to check mutual exclusiveness of sec flags.
8569 	 */
8570 	supported_flags = get_supported_adv_flags(hdev);
8571 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8572 	if (adv_flags & ~supported_flags ||
8573 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8574 		return false;
8575 
8576 	return true;
8577 }
8578 
adv_busy(struct hci_dev * hdev)8579 static bool adv_busy(struct hci_dev *hdev)
8580 {
8581 	return pending_find(MGMT_OP_SET_LE, hdev);
8582 }
8583 
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8584 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8585 			     int err)
8586 {
8587 	struct adv_info *adv, *n;
8588 
8589 	bt_dev_dbg(hdev, "err %d", err);
8590 
8591 	hci_dev_lock(hdev);
8592 
8593 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8594 		u8 instance;
8595 
8596 		if (!adv->pending)
8597 			continue;
8598 
8599 		if (!err) {
8600 			adv->pending = false;
8601 			continue;
8602 		}
8603 
8604 		instance = adv->instance;
8605 
8606 		if (hdev->cur_adv_instance == instance)
8607 			cancel_adv_timeout(hdev);
8608 
8609 		hci_remove_adv_instance(hdev, instance);
8610 		mgmt_advertising_removed(sk, hdev, instance);
8611 	}
8612 
8613 	hci_dev_unlock(hdev);
8614 }
8615 
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8616 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8617 {
8618 	struct mgmt_pending_cmd *cmd = data;
8619 	struct mgmt_cp_add_advertising *cp = cmd->param;
8620 	struct mgmt_rp_add_advertising rp;
8621 
8622 	memset(&rp, 0, sizeof(rp));
8623 
8624 	rp.instance = cp->instance;
8625 
8626 	if (err)
8627 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8628 				mgmt_status(err));
8629 	else
8630 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8631 				  mgmt_status(err), &rp, sizeof(rp));
8632 
8633 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8634 
8635 	mgmt_pending_free(cmd);
8636 }
8637 
add_advertising_sync(struct hci_dev * hdev,void * data)8638 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8639 {
8640 	struct mgmt_pending_cmd *cmd = data;
8641 	struct mgmt_cp_add_advertising *cp = cmd->param;
8642 
8643 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8644 }
8645 
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8646 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8647 			   void *data, u16 data_len)
8648 {
8649 	struct mgmt_cp_add_advertising *cp = data;
8650 	struct mgmt_rp_add_advertising rp;
8651 	u32 flags;
8652 	u8 status;
8653 	u16 timeout, duration;
8654 	unsigned int prev_instance_cnt;
8655 	u8 schedule_instance = 0;
8656 	struct adv_info *adv, *next_instance;
8657 	int err;
8658 	struct mgmt_pending_cmd *cmd;
8659 
8660 	bt_dev_dbg(hdev, "sock %p", sk);
8661 
8662 	status = mgmt_le_support(hdev);
8663 	if (status)
8664 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8665 				       status);
8666 
8667 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8668 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8669 				       MGMT_STATUS_INVALID_PARAMS);
8670 
8671 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8672 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8673 				       MGMT_STATUS_INVALID_PARAMS);
8674 
8675 	flags = __le32_to_cpu(cp->flags);
8676 	timeout = __le16_to_cpu(cp->timeout);
8677 	duration = __le16_to_cpu(cp->duration);
8678 
8679 	if (!requested_adv_flags_are_valid(hdev, flags))
8680 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8681 				       MGMT_STATUS_INVALID_PARAMS);
8682 
8683 	hci_dev_lock(hdev);
8684 
8685 	if (timeout && !hdev_is_powered(hdev)) {
8686 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8687 				      MGMT_STATUS_REJECTED);
8688 		goto unlock;
8689 	}
8690 
8691 	if (adv_busy(hdev)) {
8692 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8693 				      MGMT_STATUS_BUSY);
8694 		goto unlock;
8695 	}
8696 
8697 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8698 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8699 			       cp->scan_rsp_len, false)) {
8700 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8701 				      MGMT_STATUS_INVALID_PARAMS);
8702 		goto unlock;
8703 	}
8704 
8705 	prev_instance_cnt = hdev->adv_instance_cnt;
8706 
8707 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8708 				   cp->adv_data_len, cp->data,
8709 				   cp->scan_rsp_len,
8710 				   cp->data + cp->adv_data_len,
8711 				   timeout, duration,
8712 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8713 				   hdev->le_adv_min_interval,
8714 				   hdev->le_adv_max_interval, 0);
8715 	if (IS_ERR(adv)) {
8716 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8717 				      MGMT_STATUS_FAILED);
8718 		goto unlock;
8719 	}
8720 
8721 	/* Only trigger an advertising added event if a new instance was
8722 	 * actually added.
8723 	 */
8724 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8725 		mgmt_advertising_added(sk, hdev, cp->instance);
8726 
8727 	if (hdev->cur_adv_instance == cp->instance) {
8728 		/* If the currently advertised instance is being changed then
8729 		 * cancel the current advertising and schedule the next
8730 		 * instance. If there is only one instance then the overridden
8731 		 * advertising data will be visible right away.
8732 		 */
8733 		cancel_adv_timeout(hdev);
8734 
8735 		next_instance = hci_get_next_instance(hdev, cp->instance);
8736 		if (next_instance)
8737 			schedule_instance = next_instance->instance;
8738 	} else if (!hdev->adv_instance_timeout) {
8739 		/* Immediately advertise the new instance if no other
8740 		 * instance is currently being advertised.
8741 		 */
8742 		schedule_instance = cp->instance;
8743 	}
8744 
8745 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8746 	 * there is no instance to be advertised then we have no HCI
8747 	 * communication to make. Simply return.
8748 	 */
8749 	if (!hdev_is_powered(hdev) ||
8750 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8751 	    !schedule_instance) {
8752 		rp.instance = cp->instance;
8753 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8754 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8755 		goto unlock;
8756 	}
8757 
8758 	/* We're good to go, update advertising data, parameters, and start
8759 	 * advertising.
8760 	 */
8761 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8762 			       data_len);
8763 	if (!cmd) {
8764 		err = -ENOMEM;
8765 		goto unlock;
8766 	}
8767 
8768 	cp->instance = schedule_instance;
8769 
8770 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8771 				 add_advertising_complete);
8772 	if (err < 0)
8773 		mgmt_pending_free(cmd);
8774 
8775 unlock:
8776 	hci_dev_unlock(hdev);
8777 
8778 	return err;
8779 }
8780 
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8781 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8782 					int err)
8783 {
8784 	struct mgmt_pending_cmd *cmd = data;
8785 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8786 	struct mgmt_rp_add_ext_adv_params rp;
8787 	struct adv_info *adv;
8788 	u32 flags;
8789 
8790 	BT_DBG("%s", hdev->name);
8791 
8792 	hci_dev_lock(hdev);
8793 
8794 	adv = hci_find_adv_instance(hdev, cp->instance);
8795 	if (!adv)
8796 		goto unlock;
8797 
8798 	rp.instance = cp->instance;
8799 	rp.tx_power = adv->tx_power;
8800 
8801 	/* While we're at it, inform userspace of the available space for this
8802 	 * advertisement, given the flags that will be used.
8803 	 */
8804 	flags = __le32_to_cpu(cp->flags);
8805 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8806 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8807 
8808 	if (err) {
8809 		/* If this advertisement was previously advertising and we
8810 		 * failed to update it, we signal that it has been removed and
8811 		 * delete its structure
8812 		 */
8813 		if (!adv->pending)
8814 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8815 
8816 		hci_remove_adv_instance(hdev, cp->instance);
8817 
8818 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8819 				mgmt_status(err));
8820 	} else {
8821 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8822 				  mgmt_status(err), &rp, sizeof(rp));
8823 	}
8824 
8825 unlock:
8826 	if (cmd)
8827 		mgmt_pending_free(cmd);
8828 
8829 	hci_dev_unlock(hdev);
8830 }
8831 
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8832 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8833 {
8834 	struct mgmt_pending_cmd *cmd = data;
8835 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8836 
8837 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8838 }
8839 
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8840 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8841 			      void *data, u16 data_len)
8842 {
8843 	struct mgmt_cp_add_ext_adv_params *cp = data;
8844 	struct mgmt_rp_add_ext_adv_params rp;
8845 	struct mgmt_pending_cmd *cmd = NULL;
8846 	struct adv_info *adv;
8847 	u32 flags, min_interval, max_interval;
8848 	u16 timeout, duration;
8849 	u8 status;
8850 	s8 tx_power;
8851 	int err;
8852 
8853 	BT_DBG("%s", hdev->name);
8854 
8855 	status = mgmt_le_support(hdev);
8856 	if (status)
8857 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8858 				       status);
8859 
8860 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8861 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8862 				       MGMT_STATUS_INVALID_PARAMS);
8863 
8864 	/* The purpose of breaking add_advertising into two separate MGMT calls
8865 	 * for params and data is to allow more parameters to be added to this
8866 	 * structure in the future. For this reason, we verify that we have the
8867 	 * bare minimum structure we know of when the interface was defined. Any
8868 	 * extra parameters we don't know about will be ignored in this request.
8869 	 */
8870 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8871 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8872 				       MGMT_STATUS_INVALID_PARAMS);
8873 
8874 	flags = __le32_to_cpu(cp->flags);
8875 
8876 	if (!requested_adv_flags_are_valid(hdev, flags))
8877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8878 				       MGMT_STATUS_INVALID_PARAMS);
8879 
8880 	hci_dev_lock(hdev);
8881 
8882 	/* In new interface, we require that we are powered to register */
8883 	if (!hdev_is_powered(hdev)) {
8884 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8885 				      MGMT_STATUS_REJECTED);
8886 		goto unlock;
8887 	}
8888 
8889 	if (adv_busy(hdev)) {
8890 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8891 				      MGMT_STATUS_BUSY);
8892 		goto unlock;
8893 	}
8894 
8895 	/* Parse defined parameters from request, use defaults otherwise */
8896 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8897 		  __le16_to_cpu(cp->timeout) : 0;
8898 
8899 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8900 		   __le16_to_cpu(cp->duration) :
8901 		   hdev->def_multi_adv_rotation_duration;
8902 
8903 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8904 		       __le32_to_cpu(cp->min_interval) :
8905 		       hdev->le_adv_min_interval;
8906 
8907 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8908 		       __le32_to_cpu(cp->max_interval) :
8909 		       hdev->le_adv_max_interval;
8910 
8911 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8912 		   cp->tx_power :
8913 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8914 
8915 	/* Create advertising instance with no advertising or response data */
8916 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8917 				   timeout, duration, tx_power, min_interval,
8918 				   max_interval, 0);
8919 
8920 	if (IS_ERR(adv)) {
8921 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8922 				      MGMT_STATUS_FAILED);
8923 		goto unlock;
8924 	}
8925 
8926 	/* Submit request for advertising params if ext adv available */
8927 	if (ext_adv_capable(hdev)) {
8928 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8929 				       data, data_len);
8930 		if (!cmd) {
8931 			err = -ENOMEM;
8932 			hci_remove_adv_instance(hdev, cp->instance);
8933 			goto unlock;
8934 		}
8935 
8936 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8937 					 add_ext_adv_params_complete);
8938 		if (err < 0)
8939 			mgmt_pending_free(cmd);
8940 	} else {
8941 		rp.instance = cp->instance;
8942 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8943 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8944 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8945 		err = mgmt_cmd_complete(sk, hdev->id,
8946 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8947 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8948 	}
8949 
8950 unlock:
8951 	hci_dev_unlock(hdev);
8952 
8953 	return err;
8954 }
8955 
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8956 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8957 {
8958 	struct mgmt_pending_cmd *cmd = data;
8959 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8960 	struct mgmt_rp_add_advertising rp;
8961 
8962 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8963 
8964 	memset(&rp, 0, sizeof(rp));
8965 
8966 	rp.instance = cp->instance;
8967 
8968 	if (err)
8969 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8970 				mgmt_status(err));
8971 	else
8972 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8973 				  mgmt_status(err), &rp, sizeof(rp));
8974 
8975 	mgmt_pending_free(cmd);
8976 }
8977 
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8978 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8979 {
8980 	struct mgmt_pending_cmd *cmd = data;
8981 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8982 	int err;
8983 
8984 	if (ext_adv_capable(hdev)) {
8985 		err = hci_update_adv_data_sync(hdev, cp->instance);
8986 		if (err)
8987 			return err;
8988 
8989 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8990 		if (err)
8991 			return err;
8992 
8993 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8994 	}
8995 
8996 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8997 }
8998 
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8999 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9000 			    u16 data_len)
9001 {
9002 	struct mgmt_cp_add_ext_adv_data *cp = data;
9003 	struct mgmt_rp_add_ext_adv_data rp;
9004 	u8 schedule_instance = 0;
9005 	struct adv_info *next_instance;
9006 	struct adv_info *adv_instance;
9007 	int err = 0;
9008 	struct mgmt_pending_cmd *cmd;
9009 
9010 	BT_DBG("%s", hdev->name);
9011 
9012 	hci_dev_lock(hdev);
9013 
9014 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9015 
9016 	if (!adv_instance) {
9017 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9018 				      MGMT_STATUS_INVALID_PARAMS);
9019 		goto unlock;
9020 	}
9021 
9022 	/* In new interface, we require that we are powered to register */
9023 	if (!hdev_is_powered(hdev)) {
9024 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9025 				      MGMT_STATUS_REJECTED);
9026 		goto clear_new_instance;
9027 	}
9028 
9029 	if (adv_busy(hdev)) {
9030 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9031 				      MGMT_STATUS_BUSY);
9032 		goto clear_new_instance;
9033 	}
9034 
9035 	/* Validate new data */
9036 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9037 			       cp->adv_data_len, true) ||
9038 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9039 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9040 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9041 				      MGMT_STATUS_INVALID_PARAMS);
9042 		goto clear_new_instance;
9043 	}
9044 
9045 	/* Set the data in the advertising instance */
9046 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9047 				  cp->data, cp->scan_rsp_len,
9048 				  cp->data + cp->adv_data_len);
9049 
9050 	/* If using software rotation, determine next instance to use */
9051 	if (hdev->cur_adv_instance == cp->instance) {
9052 		/* If the currently advertised instance is being changed
9053 		 * then cancel the current advertising and schedule the
9054 		 * next instance. If there is only one instance then the
9055 		 * overridden advertising data will be visible right
9056 		 * away
9057 		 */
9058 		cancel_adv_timeout(hdev);
9059 
9060 		next_instance = hci_get_next_instance(hdev, cp->instance);
9061 		if (next_instance)
9062 			schedule_instance = next_instance->instance;
9063 	} else if (!hdev->adv_instance_timeout) {
9064 		/* Immediately advertise the new instance if no other
9065 		 * instance is currently being advertised.
9066 		 */
9067 		schedule_instance = cp->instance;
9068 	}
9069 
9070 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9071 	 * be advertised then we have no HCI communication to make.
9072 	 * Simply return.
9073 	 */
9074 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9075 		if (adv_instance->pending) {
9076 			mgmt_advertising_added(sk, hdev, cp->instance);
9077 			adv_instance->pending = false;
9078 		}
9079 		rp.instance = cp->instance;
9080 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9081 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9082 		goto unlock;
9083 	}
9084 
9085 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9086 			       data_len);
9087 	if (!cmd) {
9088 		err = -ENOMEM;
9089 		goto clear_new_instance;
9090 	}
9091 
9092 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9093 				 add_ext_adv_data_complete);
9094 	if (err < 0) {
9095 		mgmt_pending_free(cmd);
9096 		goto clear_new_instance;
9097 	}
9098 
9099 	/* We were successful in updating data, so trigger advertising_added
9100 	 * event if this is an instance that wasn't previously advertising. If
9101 	 * a failure occurs in the requests we initiated, we will remove the
9102 	 * instance again in add_advertising_complete
9103 	 */
9104 	if (adv_instance->pending)
9105 		mgmt_advertising_added(sk, hdev, cp->instance);
9106 
9107 	goto unlock;
9108 
9109 clear_new_instance:
9110 	hci_remove_adv_instance(hdev, cp->instance);
9111 
9112 unlock:
9113 	hci_dev_unlock(hdev);
9114 
9115 	return err;
9116 }
9117 
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9118 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9119 					int err)
9120 {
9121 	struct mgmt_pending_cmd *cmd = data;
9122 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9123 	struct mgmt_rp_remove_advertising rp;
9124 
9125 	bt_dev_dbg(hdev, "err %d", err);
9126 
9127 	memset(&rp, 0, sizeof(rp));
9128 	rp.instance = cp->instance;
9129 
9130 	if (err)
9131 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9132 				mgmt_status(err));
9133 	else
9134 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9135 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9136 
9137 	mgmt_pending_free(cmd);
9138 }
9139 
remove_advertising_sync(struct hci_dev * hdev,void * data)9140 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9141 {
9142 	struct mgmt_pending_cmd *cmd = data;
9143 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9144 	int err;
9145 
9146 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9147 	if (err)
9148 		return err;
9149 
9150 	if (list_empty(&hdev->adv_instances))
9151 		err = hci_disable_advertising_sync(hdev);
9152 
9153 	return err;
9154 }
9155 
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9156 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9157 			      void *data, u16 data_len)
9158 {
9159 	struct mgmt_cp_remove_advertising *cp = data;
9160 	struct mgmt_pending_cmd *cmd;
9161 	int err;
9162 
9163 	bt_dev_dbg(hdev, "sock %p", sk);
9164 
9165 	hci_dev_lock(hdev);
9166 
9167 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9168 		err = mgmt_cmd_status(sk, hdev->id,
9169 				      MGMT_OP_REMOVE_ADVERTISING,
9170 				      MGMT_STATUS_INVALID_PARAMS);
9171 		goto unlock;
9172 	}
9173 
9174 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9175 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9176 				      MGMT_STATUS_BUSY);
9177 		goto unlock;
9178 	}
9179 
9180 	if (list_empty(&hdev->adv_instances)) {
9181 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9182 				      MGMT_STATUS_INVALID_PARAMS);
9183 		goto unlock;
9184 	}
9185 
9186 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9187 			       data_len);
9188 	if (!cmd) {
9189 		err = -ENOMEM;
9190 		goto unlock;
9191 	}
9192 
9193 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9194 				 remove_advertising_complete);
9195 	if (err < 0)
9196 		mgmt_pending_free(cmd);
9197 
9198 unlock:
9199 	hci_dev_unlock(hdev);
9200 
9201 	return err;
9202 }
9203 
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9204 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9205 			     void *data, u16 data_len)
9206 {
9207 	struct mgmt_cp_get_adv_size_info *cp = data;
9208 	struct mgmt_rp_get_adv_size_info rp;
9209 	u32 flags, supported_flags;
9210 
9211 	bt_dev_dbg(hdev, "sock %p", sk);
9212 
9213 	if (!lmp_le_capable(hdev))
9214 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9215 				       MGMT_STATUS_REJECTED);
9216 
9217 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9218 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9219 				       MGMT_STATUS_INVALID_PARAMS);
9220 
9221 	flags = __le32_to_cpu(cp->flags);
9222 
9223 	/* The current implementation only supports a subset of the specified
9224 	 * flags.
9225 	 */
9226 	supported_flags = get_supported_adv_flags(hdev);
9227 	if (flags & ~supported_flags)
9228 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9229 				       MGMT_STATUS_INVALID_PARAMS);
9230 
9231 	rp.instance = cp->instance;
9232 	rp.flags = cp->flags;
9233 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9234 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9235 
9236 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9237 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9238 }
9239 
9240 static const struct hci_mgmt_handler mgmt_handlers[] = {
9241 	{ NULL }, /* 0x0000 (no command) */
9242 	{ read_version,            MGMT_READ_VERSION_SIZE,
9243 						HCI_MGMT_NO_HDEV |
9244 						HCI_MGMT_UNTRUSTED },
9245 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9246 						HCI_MGMT_NO_HDEV |
9247 						HCI_MGMT_UNTRUSTED },
9248 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9249 						HCI_MGMT_NO_HDEV |
9250 						HCI_MGMT_UNTRUSTED },
9251 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9252 						HCI_MGMT_UNTRUSTED },
9253 	{ set_powered,             MGMT_SETTING_SIZE },
9254 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9255 	{ set_connectable,         MGMT_SETTING_SIZE },
9256 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9257 	{ set_bondable,            MGMT_SETTING_SIZE },
9258 	{ set_link_security,       MGMT_SETTING_SIZE },
9259 	{ set_ssp,                 MGMT_SETTING_SIZE },
9260 	{ set_hs,                  MGMT_SETTING_SIZE },
9261 	{ set_le,                  MGMT_SETTING_SIZE },
9262 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9263 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9264 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9265 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9266 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9267 						HCI_MGMT_VAR_LEN },
9268 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9269 						HCI_MGMT_VAR_LEN },
9270 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9271 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9272 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9273 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9274 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9275 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9276 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9277 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9278 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9279 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9280 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9281 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9282 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9283 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9284 						HCI_MGMT_VAR_LEN },
9285 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9286 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9287 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9288 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9289 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9290 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9291 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9292 	{ set_advertising,         MGMT_SETTING_SIZE },
9293 	{ set_bredr,               MGMT_SETTING_SIZE },
9294 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9295 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9296 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9297 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9298 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9299 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9300 						HCI_MGMT_VAR_LEN },
9301 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9302 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9303 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9304 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9305 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9306 						HCI_MGMT_VAR_LEN },
9307 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9308 						HCI_MGMT_NO_HDEV |
9309 						HCI_MGMT_UNTRUSTED },
9310 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9311 						HCI_MGMT_UNCONFIGURED |
9312 						HCI_MGMT_UNTRUSTED },
9313 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9314 						HCI_MGMT_UNCONFIGURED },
9315 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9316 						HCI_MGMT_UNCONFIGURED },
9317 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9318 						HCI_MGMT_VAR_LEN },
9319 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9320 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9321 						HCI_MGMT_NO_HDEV |
9322 						HCI_MGMT_UNTRUSTED },
9323 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9324 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9325 						HCI_MGMT_VAR_LEN },
9326 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9327 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9328 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9329 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9330 						HCI_MGMT_UNTRUSTED },
9331 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9332 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9333 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9334 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9335 						HCI_MGMT_VAR_LEN },
9336 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9337 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9338 						HCI_MGMT_UNTRUSTED },
9339 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9340 						HCI_MGMT_UNTRUSTED |
9341 						HCI_MGMT_HDEV_OPTIONAL },
9342 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9343 						HCI_MGMT_VAR_LEN |
9344 						HCI_MGMT_HDEV_OPTIONAL },
9345 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9346 						HCI_MGMT_UNTRUSTED },
9347 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9348 						HCI_MGMT_VAR_LEN },
9349 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9350 						HCI_MGMT_UNTRUSTED },
9351 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9352 						HCI_MGMT_VAR_LEN },
9353 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9354 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9355 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9356 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9357 						HCI_MGMT_VAR_LEN },
9358 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9359 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9360 						HCI_MGMT_VAR_LEN },
9361 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9362 						HCI_MGMT_VAR_LEN },
9363 	{ add_adv_patterns_monitor_rssi,
9364 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9365 						HCI_MGMT_VAR_LEN },
9366 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9367 						HCI_MGMT_VAR_LEN },
9368 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9369 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9370 						HCI_MGMT_VAR_LEN },
9371 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9372 };
9373 
mgmt_index_added(struct hci_dev * hdev)9374 void mgmt_index_added(struct hci_dev *hdev)
9375 {
9376 	struct mgmt_ev_ext_index ev;
9377 
9378 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9379 		return;
9380 
9381 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9382 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9383 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9384 		ev.type = 0x01;
9385 	} else {
9386 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9387 				 HCI_MGMT_INDEX_EVENTS);
9388 		ev.type = 0x00;
9389 	}
9390 
9391 	ev.bus = hdev->bus;
9392 
9393 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9394 			 HCI_MGMT_EXT_INDEX_EVENTS);
9395 }
9396 
mgmt_index_removed(struct hci_dev * hdev)9397 void mgmt_index_removed(struct hci_dev *hdev)
9398 {
9399 	struct mgmt_ev_ext_index ev;
9400 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9401 
9402 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9403 		return;
9404 
9405 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9406 
9407 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9408 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9409 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9410 		ev.type = 0x01;
9411 	} else {
9412 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9413 				 HCI_MGMT_INDEX_EVENTS);
9414 		ev.type = 0x00;
9415 	}
9416 
9417 	ev.bus = hdev->bus;
9418 
9419 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9420 			 HCI_MGMT_EXT_INDEX_EVENTS);
9421 
9422 	/* Cancel any remaining timed work */
9423 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9424 		return;
9425 	cancel_delayed_work_sync(&hdev->discov_off);
9426 	cancel_delayed_work_sync(&hdev->service_cache);
9427 	cancel_delayed_work_sync(&hdev->rpa_expired);
9428 }
9429 
mgmt_power_on(struct hci_dev * hdev,int err)9430 void mgmt_power_on(struct hci_dev *hdev, int err)
9431 {
9432 	struct cmd_lookup match = { NULL, hdev };
9433 
9434 	bt_dev_dbg(hdev, "err %d", err);
9435 
9436 	hci_dev_lock(hdev);
9437 
9438 	if (!err) {
9439 		restart_le_actions(hdev);
9440 		hci_update_passive_scan(hdev);
9441 	}
9442 
9443 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9444 
9445 	new_settings(hdev, match.sk);
9446 
9447 	if (match.sk)
9448 		sock_put(match.sk);
9449 
9450 	hci_dev_unlock(hdev);
9451 }
9452 
__mgmt_power_off(struct hci_dev * hdev)9453 void __mgmt_power_off(struct hci_dev *hdev)
9454 {
9455 	struct cmd_lookup match = { NULL, hdev };
9456 	u8 zero_cod[] = { 0, 0, 0 };
9457 
9458 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9459 
9460 	/* If the power off is because of hdev unregistration let
9461 	 * use the appropriate INVALID_INDEX status. Otherwise use
9462 	 * NOT_POWERED. We cover both scenarios here since later in
9463 	 * mgmt_index_removed() any hci_conn callbacks will have already
9464 	 * been triggered, potentially causing misleading DISCONNECTED
9465 	 * status responses.
9466 	 */
9467 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9468 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9469 	else
9470 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9471 
9472 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9473 
9474 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9475 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9476 				   zero_cod, sizeof(zero_cod),
9477 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9478 		ext_info_changed(hdev, NULL);
9479 	}
9480 
9481 	new_settings(hdev, match.sk);
9482 
9483 	if (match.sk)
9484 		sock_put(match.sk);
9485 }
9486 
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9487 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9488 {
9489 	struct mgmt_pending_cmd *cmd;
9490 	u8 status;
9491 
9492 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9493 	if (!cmd)
9494 		return;
9495 
9496 	if (err == -ERFKILL)
9497 		status = MGMT_STATUS_RFKILLED;
9498 	else
9499 		status = MGMT_STATUS_FAILED;
9500 
9501 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9502 
9503 	mgmt_pending_remove(cmd);
9504 }
9505 
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9506 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9507 		       bool persistent)
9508 {
9509 	struct mgmt_ev_new_link_key ev;
9510 
9511 	memset(&ev, 0, sizeof(ev));
9512 
9513 	ev.store_hint = persistent;
9514 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9515 	ev.key.addr.type = BDADDR_BREDR;
9516 	ev.key.type = key->type;
9517 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9518 	ev.key.pin_len = key->pin_len;
9519 
9520 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9521 }
9522 
mgmt_ltk_type(struct smp_ltk * ltk)9523 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9524 {
9525 	switch (ltk->type) {
9526 	case SMP_LTK:
9527 	case SMP_LTK_RESPONDER:
9528 		if (ltk->authenticated)
9529 			return MGMT_LTK_AUTHENTICATED;
9530 		return MGMT_LTK_UNAUTHENTICATED;
9531 	case SMP_LTK_P256:
9532 		if (ltk->authenticated)
9533 			return MGMT_LTK_P256_AUTH;
9534 		return MGMT_LTK_P256_UNAUTH;
9535 	case SMP_LTK_P256_DEBUG:
9536 		return MGMT_LTK_P256_DEBUG;
9537 	}
9538 
9539 	return MGMT_LTK_UNAUTHENTICATED;
9540 }
9541 
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9542 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9543 {
9544 	struct mgmt_ev_new_long_term_key ev;
9545 
9546 	memset(&ev, 0, sizeof(ev));
9547 
9548 	/* Devices using resolvable or non-resolvable random addresses
9549 	 * without providing an identity resolving key don't require
9550 	 * to store long term keys. Their addresses will change the
9551 	 * next time around.
9552 	 *
9553 	 * Only when a remote device provides an identity address
9554 	 * make sure the long term key is stored. If the remote
9555 	 * identity is known, the long term keys are internally
9556 	 * mapped to the identity address. So allow static random
9557 	 * and public addresses here.
9558 	 */
9559 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9560 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9561 		ev.store_hint = 0x00;
9562 	else
9563 		ev.store_hint = persistent;
9564 
9565 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9566 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9567 	ev.key.type = mgmt_ltk_type(key);
9568 	ev.key.enc_size = key->enc_size;
9569 	ev.key.ediv = key->ediv;
9570 	ev.key.rand = key->rand;
9571 
9572 	if (key->type == SMP_LTK)
9573 		ev.key.initiator = 1;
9574 
9575 	/* Make sure we copy only the significant bytes based on the
9576 	 * encryption key size, and set the rest of the value to zeroes.
9577 	 */
9578 	memcpy(ev.key.val, key->val, key->enc_size);
9579 	memset(ev.key.val + key->enc_size, 0,
9580 	       sizeof(ev.key.val) - key->enc_size);
9581 
9582 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9583 }
9584 
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9585 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9586 {
9587 	struct mgmt_ev_new_irk ev;
9588 
9589 	memset(&ev, 0, sizeof(ev));
9590 
9591 	ev.store_hint = persistent;
9592 
9593 	bacpy(&ev.rpa, &irk->rpa);
9594 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9595 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9596 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9597 
9598 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9599 }
9600 
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9601 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9602 		   bool persistent)
9603 {
9604 	struct mgmt_ev_new_csrk ev;
9605 
9606 	memset(&ev, 0, sizeof(ev));
9607 
9608 	/* Devices using resolvable or non-resolvable random addresses
9609 	 * without providing an identity resolving key don't require
9610 	 * to store signature resolving keys. Their addresses will change
9611 	 * the next time around.
9612 	 *
9613 	 * Only when a remote device provides an identity address
9614 	 * make sure the signature resolving key is stored. So allow
9615 	 * static random and public addresses here.
9616 	 */
9617 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9618 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9619 		ev.store_hint = 0x00;
9620 	else
9621 		ev.store_hint = persistent;
9622 
9623 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9624 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9625 	ev.key.type = csrk->type;
9626 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9627 
9628 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9629 }
9630 
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9631 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9632 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9633 			 u16 max_interval, u16 latency, u16 timeout)
9634 {
9635 	struct mgmt_ev_new_conn_param ev;
9636 
9637 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9638 		return;
9639 
9640 	memset(&ev, 0, sizeof(ev));
9641 	bacpy(&ev.addr.bdaddr, bdaddr);
9642 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9643 	ev.store_hint = store_hint;
9644 	ev.min_interval = cpu_to_le16(min_interval);
9645 	ev.max_interval = cpu_to_le16(max_interval);
9646 	ev.latency = cpu_to_le16(latency);
9647 	ev.timeout = cpu_to_le16(timeout);
9648 
9649 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9650 }
9651 
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9652 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9653 			   u8 *name, u8 name_len)
9654 {
9655 	struct sk_buff *skb;
9656 	struct mgmt_ev_device_connected *ev;
9657 	u16 eir_len = 0;
9658 	u32 flags = 0;
9659 
9660 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9661 		return;
9662 
9663 	/* allocate buff for LE or BR/EDR adv */
9664 	if (conn->le_adv_data_len > 0)
9665 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9666 				     sizeof(*ev) + conn->le_adv_data_len);
9667 	else
9668 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9669 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9670 				     eir_precalc_len(sizeof(conn->dev_class)));
9671 
9672 	ev = skb_put(skb, sizeof(*ev));
9673 	bacpy(&ev->addr.bdaddr, &conn->dst);
9674 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9675 
9676 	if (conn->out)
9677 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9678 
9679 	ev->flags = __cpu_to_le32(flags);
9680 
9681 	/* We must ensure that the EIR Data fields are ordered and
9682 	 * unique. Keep it simple for now and avoid the problem by not
9683 	 * adding any BR/EDR data to the LE adv.
9684 	 */
9685 	if (conn->le_adv_data_len > 0) {
9686 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9687 		eir_len = conn->le_adv_data_len;
9688 	} else {
9689 		if (name)
9690 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9691 
9692 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9693 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9694 						    conn->dev_class, sizeof(conn->dev_class));
9695 	}
9696 
9697 	ev->eir_len = cpu_to_le16(eir_len);
9698 
9699 	mgmt_event_skb(skb, NULL);
9700 }
9701 
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9702 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9703 {
9704 	struct hci_dev *hdev = data;
9705 	struct mgmt_cp_unpair_device *cp = cmd->param;
9706 
9707 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9708 
9709 	cmd->cmd_complete(cmd, 0);
9710 	mgmt_pending_remove(cmd);
9711 }
9712 
mgmt_powering_down(struct hci_dev * hdev)9713 bool mgmt_powering_down(struct hci_dev *hdev)
9714 {
9715 	struct mgmt_pending_cmd *cmd;
9716 	struct mgmt_mode *cp;
9717 
9718 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9719 	if (!cmd)
9720 		return false;
9721 
9722 	cp = cmd->param;
9723 	if (!cp->val)
9724 		return true;
9725 
9726 	return false;
9727 }
9728 
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9729 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9730 			      u8 link_type, u8 addr_type, u8 reason,
9731 			      bool mgmt_connected)
9732 {
9733 	struct mgmt_ev_device_disconnected ev;
9734 	struct sock *sk = NULL;
9735 
9736 	if (!mgmt_connected)
9737 		return;
9738 
9739 	if (link_type != ACL_LINK && link_type != LE_LINK)
9740 		return;
9741 
9742 	bacpy(&ev.addr.bdaddr, bdaddr);
9743 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9744 	ev.reason = reason;
9745 
9746 	/* Report disconnects due to suspend */
9747 	if (hdev->suspended)
9748 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9749 
9750 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9751 
9752 	if (sk)
9753 		sock_put(sk);
9754 }
9755 
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9756 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9757 			    u8 link_type, u8 addr_type, u8 status)
9758 {
9759 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9760 	struct mgmt_cp_disconnect *cp;
9761 	struct mgmt_pending_cmd *cmd;
9762 
9763 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9764 			     hdev);
9765 
9766 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9767 	if (!cmd)
9768 		return;
9769 
9770 	cp = cmd->param;
9771 
9772 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9773 		return;
9774 
9775 	if (cp->addr.type != bdaddr_type)
9776 		return;
9777 
9778 	cmd->cmd_complete(cmd, mgmt_status(status));
9779 	mgmt_pending_remove(cmd);
9780 }
9781 
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9782 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9783 {
9784 	struct mgmt_ev_connect_failed ev;
9785 
9786 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9787 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9788 					 conn->dst_type, status, true);
9789 		return;
9790 	}
9791 
9792 	bacpy(&ev.addr.bdaddr, &conn->dst);
9793 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9794 	ev.status = mgmt_status(status);
9795 
9796 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9797 }
9798 
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9799 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9800 {
9801 	struct mgmt_ev_pin_code_request ev;
9802 
9803 	bacpy(&ev.addr.bdaddr, bdaddr);
9804 	ev.addr.type = BDADDR_BREDR;
9805 	ev.secure = secure;
9806 
9807 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9808 }
9809 
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9810 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9811 				  u8 status)
9812 {
9813 	struct mgmt_pending_cmd *cmd;
9814 
9815 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9816 	if (!cmd)
9817 		return;
9818 
9819 	cmd->cmd_complete(cmd, mgmt_status(status));
9820 	mgmt_pending_remove(cmd);
9821 }
9822 
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9823 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9824 				      u8 status)
9825 {
9826 	struct mgmt_pending_cmd *cmd;
9827 
9828 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9829 	if (!cmd)
9830 		return;
9831 
9832 	cmd->cmd_complete(cmd, mgmt_status(status));
9833 	mgmt_pending_remove(cmd);
9834 }
9835 
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9836 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9837 			      u8 link_type, u8 addr_type, u32 value,
9838 			      u8 confirm_hint)
9839 {
9840 	struct mgmt_ev_user_confirm_request ev;
9841 
9842 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9843 
9844 	bacpy(&ev.addr.bdaddr, bdaddr);
9845 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9846 	ev.confirm_hint = confirm_hint;
9847 	ev.value = cpu_to_le32(value);
9848 
9849 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9850 			  NULL);
9851 }
9852 
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9853 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 			      u8 link_type, u8 addr_type)
9855 {
9856 	struct mgmt_ev_user_passkey_request ev;
9857 
9858 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9859 
9860 	bacpy(&ev.addr.bdaddr, bdaddr);
9861 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9862 
9863 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9864 			  NULL);
9865 }
9866 
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9867 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9868 				      u8 link_type, u8 addr_type, u8 status,
9869 				      u8 opcode)
9870 {
9871 	struct mgmt_pending_cmd *cmd;
9872 
9873 	cmd = pending_find(opcode, hdev);
9874 	if (!cmd)
9875 		return -ENOENT;
9876 
9877 	cmd->cmd_complete(cmd, mgmt_status(status));
9878 	mgmt_pending_remove(cmd);
9879 
9880 	return 0;
9881 }
9882 
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9883 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884 				     u8 link_type, u8 addr_type, u8 status)
9885 {
9886 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9887 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9888 }
9889 
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9890 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9891 					 u8 link_type, u8 addr_type, u8 status)
9892 {
9893 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9894 					  status,
9895 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9896 }
9897 
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9898 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9899 				     u8 link_type, u8 addr_type, u8 status)
9900 {
9901 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9902 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9903 }
9904 
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9905 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9906 					 u8 link_type, u8 addr_type, u8 status)
9907 {
9908 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9909 					  status,
9910 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9911 }
9912 
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9913 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9914 			     u8 link_type, u8 addr_type, u32 passkey,
9915 			     u8 entered)
9916 {
9917 	struct mgmt_ev_passkey_notify ev;
9918 
9919 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9920 
9921 	bacpy(&ev.addr.bdaddr, bdaddr);
9922 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9923 	ev.passkey = __cpu_to_le32(passkey);
9924 	ev.entered = entered;
9925 
9926 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9927 }
9928 
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9929 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9930 {
9931 	struct mgmt_ev_auth_failed ev;
9932 	struct mgmt_pending_cmd *cmd;
9933 	u8 status = mgmt_status(hci_status);
9934 
9935 	bacpy(&ev.addr.bdaddr, &conn->dst);
9936 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9937 	ev.status = status;
9938 
9939 	cmd = find_pairing(conn);
9940 
9941 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9942 		    cmd ? cmd->sk : NULL);
9943 
9944 	if (cmd) {
9945 		cmd->cmd_complete(cmd, status);
9946 		mgmt_pending_remove(cmd);
9947 	}
9948 }
9949 
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9950 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9951 {
9952 	struct cmd_lookup match = { NULL, hdev };
9953 	bool changed;
9954 
9955 	if (status) {
9956 		u8 mgmt_err = mgmt_status(status);
9957 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9958 				     cmd_status_rsp, &mgmt_err);
9959 		return;
9960 	}
9961 
9962 	if (test_bit(HCI_AUTH, &hdev->flags))
9963 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9964 	else
9965 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9966 
9967 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9968 			     &match);
9969 
9970 	if (changed)
9971 		new_settings(hdev, match.sk);
9972 
9973 	if (match.sk)
9974 		sock_put(match.sk);
9975 }
9976 
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9977 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9978 {
9979 	struct cmd_lookup *match = data;
9980 
9981 	if (match->sk == NULL) {
9982 		match->sk = cmd->sk;
9983 		sock_hold(match->sk);
9984 	}
9985 }
9986 
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9987 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9988 				    u8 status)
9989 {
9990 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9991 
9992 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9993 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9994 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9995 
9996 	if (!status) {
9997 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9998 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9999 		ext_info_changed(hdev, NULL);
10000 	}
10001 
10002 	if (match.sk)
10003 		sock_put(match.sk);
10004 }
10005 
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10006 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10007 {
10008 	struct mgmt_cp_set_local_name ev;
10009 	struct mgmt_pending_cmd *cmd;
10010 
10011 	if (status)
10012 		return;
10013 
10014 	memset(&ev, 0, sizeof(ev));
10015 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10016 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10017 
10018 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10019 	if (!cmd) {
10020 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10021 
10022 		/* If this is a HCI command related to powering on the
10023 		 * HCI dev don't send any mgmt signals.
10024 		 */
10025 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10026 			return;
10027 	}
10028 
10029 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10030 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10031 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10032 }
10033 
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10034 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10035 {
10036 	int i;
10037 
10038 	for (i = 0; i < uuid_count; i++) {
10039 		if (!memcmp(uuid, uuids[i], 16))
10040 			return true;
10041 	}
10042 
10043 	return false;
10044 }
10045 
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10046 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10047 {
10048 	u16 parsed = 0;
10049 
10050 	while (parsed < eir_len) {
10051 		u8 field_len = eir[0];
10052 		u8 uuid[16];
10053 		int i;
10054 
10055 		if (field_len == 0)
10056 			break;
10057 
10058 		if (eir_len - parsed < field_len + 1)
10059 			break;
10060 
10061 		switch (eir[1]) {
10062 		case EIR_UUID16_ALL:
10063 		case EIR_UUID16_SOME:
10064 			for (i = 0; i + 3 <= field_len; i += 2) {
10065 				memcpy(uuid, bluetooth_base_uuid, 16);
10066 				uuid[13] = eir[i + 3];
10067 				uuid[12] = eir[i + 2];
10068 				if (has_uuid(uuid, uuid_count, uuids))
10069 					return true;
10070 			}
10071 			break;
10072 		case EIR_UUID32_ALL:
10073 		case EIR_UUID32_SOME:
10074 			for (i = 0; i + 5 <= field_len; i += 4) {
10075 				memcpy(uuid, bluetooth_base_uuid, 16);
10076 				uuid[15] = eir[i + 5];
10077 				uuid[14] = eir[i + 4];
10078 				uuid[13] = eir[i + 3];
10079 				uuid[12] = eir[i + 2];
10080 				if (has_uuid(uuid, uuid_count, uuids))
10081 					return true;
10082 			}
10083 			break;
10084 		case EIR_UUID128_ALL:
10085 		case EIR_UUID128_SOME:
10086 			for (i = 0; i + 17 <= field_len; i += 16) {
10087 				memcpy(uuid, eir + i + 2, 16);
10088 				if (has_uuid(uuid, uuid_count, uuids))
10089 					return true;
10090 			}
10091 			break;
10092 		}
10093 
10094 		parsed += field_len + 1;
10095 		eir += field_len + 1;
10096 	}
10097 
10098 	return false;
10099 }
10100 
restart_le_scan(struct hci_dev * hdev)10101 static void restart_le_scan(struct hci_dev *hdev)
10102 {
10103 	/* If controller is not scanning we are done. */
10104 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10105 		return;
10106 
10107 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10108 		       hdev->discovery.scan_start +
10109 		       hdev->discovery.scan_duration))
10110 		return;
10111 
10112 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10113 			   DISCOV_LE_RESTART_DELAY);
10114 }
10115 
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10116 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10117 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10118 {
10119 	/* If a RSSI threshold has been specified, and
10120 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10121 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10122 	 * is set, let it through for further processing, as we might need to
10123 	 * restart the scan.
10124 	 *
10125 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10126 	 * the results are also dropped.
10127 	 */
10128 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10129 	    (rssi == HCI_RSSI_INVALID ||
10130 	    (rssi < hdev->discovery.rssi &&
10131 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10132 		return  false;
10133 
10134 	if (hdev->discovery.uuid_count != 0) {
10135 		/* If a list of UUIDs is provided in filter, results with no
10136 		 * matching UUID should be dropped.
10137 		 */
10138 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10139 				   hdev->discovery.uuids) &&
10140 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10141 				   hdev->discovery.uuid_count,
10142 				   hdev->discovery.uuids))
10143 			return false;
10144 	}
10145 
10146 	/* If duplicate filtering does not report RSSI changes, then restart
10147 	 * scanning to ensure updated result with updated RSSI values.
10148 	 */
10149 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10150 		restart_le_scan(hdev);
10151 
10152 		/* Validate RSSI value against the RSSI threshold once more. */
10153 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10154 		    rssi < hdev->discovery.rssi)
10155 			return false;
10156 	}
10157 
10158 	return true;
10159 }
10160 
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10161 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10162 				  bdaddr_t *bdaddr, u8 addr_type)
10163 {
10164 	struct mgmt_ev_adv_monitor_device_lost ev;
10165 
10166 	ev.monitor_handle = cpu_to_le16(handle);
10167 	bacpy(&ev.addr.bdaddr, bdaddr);
10168 	ev.addr.type = addr_type;
10169 
10170 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10171 		   NULL);
10172 }
10173 
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10174 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10175 					       struct sk_buff *skb,
10176 					       struct sock *skip_sk,
10177 					       u16 handle)
10178 {
10179 	struct sk_buff *advmon_skb;
10180 	size_t advmon_skb_len;
10181 	__le16 *monitor_handle;
10182 
10183 	if (!skb)
10184 		return;
10185 
10186 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10187 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10188 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10189 				    advmon_skb_len);
10190 	if (!advmon_skb)
10191 		return;
10192 
10193 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10194 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10195 	 * store monitor_handle of the matched monitor.
10196 	 */
10197 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10198 	*monitor_handle = cpu_to_le16(handle);
10199 	skb_put_data(advmon_skb, skb->data, skb->len);
10200 
10201 	mgmt_event_skb(advmon_skb, skip_sk);
10202 }
10203 
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10204 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10205 					  bdaddr_t *bdaddr, bool report_device,
10206 					  struct sk_buff *skb,
10207 					  struct sock *skip_sk)
10208 {
10209 	struct monitored_device *dev, *tmp;
10210 	bool matched = false;
10211 	bool notified = false;
10212 
10213 	/* We have received the Advertisement Report because:
10214 	 * 1. the kernel has initiated active discovery
10215 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10216 	 *    passive scanning
10217 	 * 3. if none of the above is true, we have one or more active
10218 	 *    Advertisement Monitor
10219 	 *
10220 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10221 	 * and report ONLY one advertisement per device for the matched Monitor
10222 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10223 	 *
10224 	 * For case 3, since we are not active scanning and all advertisements
10225 	 * received are due to a matched Advertisement Monitor, report all
10226 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10227 	 */
10228 	if (report_device && !hdev->advmon_pend_notify) {
10229 		mgmt_event_skb(skb, skip_sk);
10230 		return;
10231 	}
10232 
10233 	hdev->advmon_pend_notify = false;
10234 
10235 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10236 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10237 			matched = true;
10238 
10239 			if (!dev->notified) {
10240 				mgmt_send_adv_monitor_device_found(hdev, skb,
10241 								   skip_sk,
10242 								   dev->handle);
10243 				notified = true;
10244 				dev->notified = true;
10245 			}
10246 		}
10247 
10248 		if (!dev->notified)
10249 			hdev->advmon_pend_notify = true;
10250 	}
10251 
10252 	if (!report_device &&
10253 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10254 		/* Handle 0 indicates that we are not active scanning and this
10255 		 * is a subsequent advertisement report for an already matched
10256 		 * Advertisement Monitor or the controller offloading support
10257 		 * is not available.
10258 		 */
10259 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10260 	}
10261 
10262 	if (report_device)
10263 		mgmt_event_skb(skb, skip_sk);
10264 	else
10265 		kfree_skb(skb);
10266 }
10267 
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10268 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10269 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10270 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10271 			      u64 instant)
10272 {
10273 	struct sk_buff *skb;
10274 	struct mgmt_ev_mesh_device_found *ev;
10275 	int i, j;
10276 
10277 	if (!hdev->mesh_ad_types[0])
10278 		goto accepted;
10279 
10280 	/* Scan for requested AD types */
10281 	if (eir_len > 0) {
10282 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10283 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10284 				if (!hdev->mesh_ad_types[j])
10285 					break;
10286 
10287 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10288 					goto accepted;
10289 			}
10290 		}
10291 	}
10292 
10293 	if (scan_rsp_len > 0) {
10294 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10295 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10296 				if (!hdev->mesh_ad_types[j])
10297 					break;
10298 
10299 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10300 					goto accepted;
10301 			}
10302 		}
10303 	}
10304 
10305 	return;
10306 
10307 accepted:
10308 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10309 			     sizeof(*ev) + eir_len + scan_rsp_len);
10310 	if (!skb)
10311 		return;
10312 
10313 	ev = skb_put(skb, sizeof(*ev));
10314 
10315 	bacpy(&ev->addr.bdaddr, bdaddr);
10316 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10317 	ev->rssi = rssi;
10318 	ev->flags = cpu_to_le32(flags);
10319 	ev->instant = cpu_to_le64(instant);
10320 
10321 	if (eir_len > 0)
10322 		/* Copy EIR or advertising data into event */
10323 		skb_put_data(skb, eir, eir_len);
10324 
10325 	if (scan_rsp_len > 0)
10326 		/* Append scan response data to event */
10327 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10328 
10329 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10330 
10331 	mgmt_event_skb(skb, NULL);
10332 }
10333 
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10334 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10335 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10336 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10337 		       u64 instant)
10338 {
10339 	struct sk_buff *skb;
10340 	struct mgmt_ev_device_found *ev;
10341 	bool report_device = hci_discovery_active(hdev);
10342 
10343 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10344 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10345 				  eir, eir_len, scan_rsp, scan_rsp_len,
10346 				  instant);
10347 
10348 	/* Don't send events for a non-kernel initiated discovery. With
10349 	 * LE one exception is if we have pend_le_reports > 0 in which
10350 	 * case we're doing passive scanning and want these events.
10351 	 */
10352 	if (!hci_discovery_active(hdev)) {
10353 		if (link_type == ACL_LINK)
10354 			return;
10355 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10356 			report_device = true;
10357 		else if (!hci_is_adv_monitoring(hdev))
10358 			return;
10359 	}
10360 
10361 	if (hdev->discovery.result_filtering) {
10362 		/* We are using service discovery */
10363 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10364 				     scan_rsp_len))
10365 			return;
10366 	}
10367 
10368 	if (hdev->discovery.limited) {
10369 		/* Check for limited discoverable bit */
10370 		if (dev_class) {
10371 			if (!(dev_class[1] & 0x20))
10372 				return;
10373 		} else {
10374 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10375 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10376 				return;
10377 		}
10378 	}
10379 
10380 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10381 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10382 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10383 	if (!skb)
10384 		return;
10385 
10386 	ev = skb_put(skb, sizeof(*ev));
10387 
10388 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10389 	 * RSSI value was reported as 0 when not available. This behavior
10390 	 * is kept when using device discovery. This is required for full
10391 	 * backwards compatibility with the API.
10392 	 *
10393 	 * However when using service discovery, the value 127 will be
10394 	 * returned when the RSSI is not available.
10395 	 */
10396 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10397 	    link_type == ACL_LINK)
10398 		rssi = 0;
10399 
10400 	bacpy(&ev->addr.bdaddr, bdaddr);
10401 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10402 	ev->rssi = rssi;
10403 	ev->flags = cpu_to_le32(flags);
10404 
10405 	if (eir_len > 0)
10406 		/* Copy EIR or advertising data into event */
10407 		skb_put_data(skb, eir, eir_len);
10408 
10409 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10410 		u8 eir_cod[5];
10411 
10412 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10413 					   dev_class, 3);
10414 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10415 	}
10416 
10417 	if (scan_rsp_len > 0)
10418 		/* Append scan response data to event */
10419 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10420 
10421 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10422 
10423 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10424 }
10425 
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10426 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10427 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10428 {
10429 	struct sk_buff *skb;
10430 	struct mgmt_ev_device_found *ev;
10431 	u16 eir_len = 0;
10432 	u32 flags = 0;
10433 
10434 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10435 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10436 
10437 	ev = skb_put(skb, sizeof(*ev));
10438 	bacpy(&ev->addr.bdaddr, bdaddr);
10439 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10440 	ev->rssi = rssi;
10441 
10442 	if (name)
10443 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10444 	else
10445 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10446 
10447 	ev->eir_len = cpu_to_le16(eir_len);
10448 	ev->flags = cpu_to_le32(flags);
10449 
10450 	mgmt_event_skb(skb, NULL);
10451 }
10452 
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10453 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10454 {
10455 	struct mgmt_ev_discovering ev;
10456 
10457 	bt_dev_dbg(hdev, "discovering %u", discovering);
10458 
10459 	memset(&ev, 0, sizeof(ev));
10460 	ev.type = hdev->discovery.type;
10461 	ev.discovering = discovering;
10462 
10463 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10464 }
10465 
mgmt_suspending(struct hci_dev * hdev,u8 state)10466 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10467 {
10468 	struct mgmt_ev_controller_suspend ev;
10469 
10470 	ev.suspend_state = state;
10471 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10472 }
10473 
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10474 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10475 		   u8 addr_type)
10476 {
10477 	struct mgmt_ev_controller_resume ev;
10478 
10479 	ev.wake_reason = reason;
10480 	if (bdaddr) {
10481 		bacpy(&ev.addr.bdaddr, bdaddr);
10482 		ev.addr.type = addr_type;
10483 	} else {
10484 		memset(&ev.addr, 0, sizeof(ev.addr));
10485 	}
10486 
10487 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10488 }
10489 
10490 static struct hci_mgmt_chan chan = {
10491 	.channel	= HCI_CHANNEL_CONTROL,
10492 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10493 	.handlers	= mgmt_handlers,
10494 	.hdev_init	= mgmt_init_hdev,
10495 };
10496 
mgmt_init(void)10497 int mgmt_init(void)
10498 {
10499 	return hci_mgmt_chan_register(&chan);
10500 }
10501 
mgmt_exit(void)10502 void mgmt_exit(void)
10503 {
10504 	hci_mgmt_chan_unregister(&chan);
10505 }
10506 
mgmt_cleanup(struct sock * sk)10507 void mgmt_cleanup(struct sock *sk)
10508 {
10509 	struct mgmt_mesh_tx *mesh_tx;
10510 	struct hci_dev *hdev;
10511 
10512 	read_lock(&hci_dev_list_lock);
10513 
10514 	list_for_each_entry(hdev, &hci_dev_list, list) {
10515 		do {
10516 			mesh_tx = mgmt_mesh_next(hdev, sk);
10517 
10518 			if (mesh_tx)
10519 				mesh_send_complete(hdev, mesh_tx, true);
10520 		} while (mesh_tx);
10521 	}
10522 
10523 	read_unlock(&hci_dev_list_lock);
10524 }
10525