xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 1e9a69f2)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (d->dev_type == HCI_PRIMARY &&
447 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 			count++;
449 	}
450 
451 	rp_len = sizeof(*rp) + (2 * count);
452 	rp = kmalloc(rp_len, GFP_ATOMIC);
453 	if (!rp) {
454 		read_unlock(&hci_dev_list_lock);
455 		return -ENOMEM;
456 	}
457 
458 	count = 0;
459 	list_for_each_entry(d, &hci_dev_list, list) {
460 		if (hci_dev_test_flag(d, HCI_SETUP) ||
461 		    hci_dev_test_flag(d, HCI_CONFIG) ||
462 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 			continue;
464 
465 		/* Devices marked as raw-only are neither configured
466 		 * nor unconfigured controllers.
467 		 */
468 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 			continue;
470 
471 		if (d->dev_type == HCI_PRIMARY &&
472 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 			rp->index[count++] = cpu_to_le16(d->id);
474 			bt_dev_dbg(hdev, "Added hci%u", d->id);
475 		}
476 	}
477 
478 	rp->num_controllers = cpu_to_le16(count);
479 	rp_len = sizeof(*rp) + (2 * count);
480 
481 	read_unlock(&hci_dev_list_lock);
482 
483 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 				0, rp, rp_len);
485 
486 	kfree(rp);
487 
488 	return err;
489 }
490 
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 				  void *data, u16 data_len)
493 {
494 	struct mgmt_rp_read_unconf_index_list *rp;
495 	struct hci_dev *d;
496 	size_t rp_len;
497 	u16 count;
498 	int err;
499 
500 	bt_dev_dbg(hdev, "sock %p", sk);
501 
502 	read_lock(&hci_dev_list_lock);
503 
504 	count = 0;
505 	list_for_each_entry(d, &hci_dev_list, list) {
506 		if (d->dev_type == HCI_PRIMARY &&
507 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 			count++;
509 	}
510 
511 	rp_len = sizeof(*rp) + (2 * count);
512 	rp = kmalloc(rp_len, GFP_ATOMIC);
513 	if (!rp) {
514 		read_unlock(&hci_dev_list_lock);
515 		return -ENOMEM;
516 	}
517 
518 	count = 0;
519 	list_for_each_entry(d, &hci_dev_list, list) {
520 		if (hci_dev_test_flag(d, HCI_SETUP) ||
521 		    hci_dev_test_flag(d, HCI_CONFIG) ||
522 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 			continue;
524 
525 		/* Devices marked as raw-only are neither configured
526 		 * nor unconfigured controllers.
527 		 */
528 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 			continue;
530 
531 		if (d->dev_type == HCI_PRIMARY &&
532 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 			rp->index[count++] = cpu_to_le16(d->id);
534 			bt_dev_dbg(hdev, "Added hci%u", d->id);
535 		}
536 	}
537 
538 	rp->num_controllers = cpu_to_le16(count);
539 	rp_len = sizeof(*rp) + (2 * count);
540 
541 	read_unlock(&hci_dev_list_lock);
542 
543 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545 
546 	kfree(rp);
547 
548 	return err;
549 }
550 
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 			       void *data, u16 data_len)
553 {
554 	struct mgmt_rp_read_ext_index_list *rp;
555 	struct hci_dev *d;
556 	u16 count;
557 	int err;
558 
559 	bt_dev_dbg(hdev, "sock %p", sk);
560 
561 	read_lock(&hci_dev_list_lock);
562 
563 	count = 0;
564 	list_for_each_entry(d, &hci_dev_list, list) {
565 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 			count++;
567 	}
568 
569 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 	if (!rp) {
571 		read_unlock(&hci_dev_list_lock);
572 		return -ENOMEM;
573 	}
574 
575 	count = 0;
576 	list_for_each_entry(d, &hci_dev_list, list) {
577 		if (hci_dev_test_flag(d, HCI_SETUP) ||
578 		    hci_dev_test_flag(d, HCI_CONFIG) ||
579 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 			continue;
581 
582 		/* Devices marked as raw-only are neither configured
583 		 * nor unconfigured controllers.
584 		 */
585 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 			continue;
587 
588 		if (d->dev_type == HCI_PRIMARY) {
589 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 				rp->entry[count].type = 0x01;
591 			else
592 				rp->entry[count].type = 0x00;
593 		} else if (d->dev_type == HCI_AMP) {
594 			rp->entry[count].type = 0x02;
595 		} else {
596 			continue;
597 		}
598 
599 		rp->entry[count].bus = d->bus;
600 		rp->entry[count++].index = cpu_to_le16(d->id);
601 		bt_dev_dbg(hdev, "Added hci%u", d->id);
602 	}
603 
604 	rp->num_controllers = cpu_to_le16(count);
605 
606 	read_unlock(&hci_dev_list_lock);
607 
608 	/* If this command is called at least once, then all the
609 	 * default index and unconfigured index events are disabled
610 	 * and from now on only extended index events are used.
611 	 */
612 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615 
616 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 				struct_size(rp, entry, count));
619 
620 	kfree(rp);
621 
622 	return err;
623 }
624 
is_configured(struct hci_dev * hdev)625 static bool is_configured(struct hci_dev *hdev)
626 {
627 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 		return false;
630 
631 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
634 		return false;
635 
636 	return true;
637 }
638 
get_missing_options(struct hci_dev * hdev)639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641 	u32 options = 0;
642 
643 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
646 
647 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
650 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 
652 	return cpu_to_le32(options);
653 }
654 
new_options(struct hci_dev * hdev,struct sock * skip)655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657 	__le32 options = get_missing_options(hdev);
658 
659 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662 
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665 	__le32 options = get_missing_options(hdev);
666 
667 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668 				 sizeof(options));
669 }
670 
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 			    void *data, u16 data_len)
673 {
674 	struct mgmt_rp_read_config_info rp;
675 	u32 options = 0;
676 
677 	bt_dev_dbg(hdev, "sock %p", sk);
678 
679 	hci_dev_lock(hdev);
680 
681 	memset(&rp, 0, sizeof(rp));
682 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683 
684 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
686 
687 	if (hdev->set_bdaddr)
688 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
689 
690 	rp.supported_options = cpu_to_le32(options);
691 	rp.missing_options = get_missing_options(hdev);
692 
693 	hci_dev_unlock(hdev);
694 
695 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696 				 &rp, sizeof(rp));
697 }
698 
get_supported_phys(struct hci_dev * hdev)699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701 	u32 supported_phys = 0;
702 
703 	if (lmp_bredr_capable(hdev)) {
704 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705 
706 		if (hdev->features[0][0] & LMP_3SLOT)
707 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708 
709 		if (hdev->features[0][0] & LMP_5SLOT)
710 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711 
712 		if (lmp_edr_2m_capable(hdev)) {
713 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714 
715 			if (lmp_edr_3slot_capable(hdev))
716 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717 
718 			if (lmp_edr_5slot_capable(hdev))
719 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720 
721 			if (lmp_edr_3m_capable(hdev)) {
722 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723 
724 				if (lmp_edr_3slot_capable(hdev))
725 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726 
727 				if (lmp_edr_5slot_capable(hdev))
728 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 			}
730 		}
731 	}
732 
733 	if (lmp_le_capable(hdev)) {
734 		supported_phys |= MGMT_PHY_LE_1M_TX;
735 		supported_phys |= MGMT_PHY_LE_1M_RX;
736 
737 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 			supported_phys |= MGMT_PHY_LE_2M_TX;
739 			supported_phys |= MGMT_PHY_LE_2M_RX;
740 		}
741 
742 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 			supported_phys |= MGMT_PHY_LE_CODED_TX;
744 			supported_phys |= MGMT_PHY_LE_CODED_RX;
745 		}
746 	}
747 
748 	return supported_phys;
749 }
750 
get_selected_phys(struct hci_dev * hdev)751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753 	u32 selected_phys = 0;
754 
755 	if (lmp_bredr_capable(hdev)) {
756 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757 
758 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760 
761 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763 
764 		if (lmp_edr_2m_capable(hdev)) {
765 			if (!(hdev->pkt_type & HCI_2DH1))
766 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767 
768 			if (lmp_edr_3slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH3))
770 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771 
772 			if (lmp_edr_5slot_capable(hdev) &&
773 			    !(hdev->pkt_type & HCI_2DH5))
774 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775 
776 			if (lmp_edr_3m_capable(hdev)) {
777 				if (!(hdev->pkt_type & HCI_3DH1))
778 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779 
780 				if (lmp_edr_3slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH3))
782 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783 
784 				if (lmp_edr_5slot_capable(hdev) &&
785 				    !(hdev->pkt_type & HCI_3DH5))
786 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 			}
788 		}
789 	}
790 
791 	if (lmp_le_capable(hdev)) {
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 			selected_phys |= MGMT_PHY_LE_1M_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 			selected_phys |= MGMT_PHY_LE_1M_RX;
797 
798 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 			selected_phys |= MGMT_PHY_LE_2M_TX;
800 
801 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 			selected_phys |= MGMT_PHY_LE_2M_RX;
803 
804 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 			selected_phys |= MGMT_PHY_LE_CODED_TX;
806 
807 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 			selected_phys |= MGMT_PHY_LE_CODED_RX;
809 	}
810 
811 	return selected_phys;
812 }
813 
get_configurable_phys(struct hci_dev * hdev)814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819 
get_supported_settings(struct hci_dev * hdev)820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822 	u32 settings = 0;
823 
824 	settings |= MGMT_SETTING_POWERED;
825 	settings |= MGMT_SETTING_BONDABLE;
826 	settings |= MGMT_SETTING_DEBUG_KEYS;
827 	settings |= MGMT_SETTING_CONNECTABLE;
828 	settings |= MGMT_SETTING_DISCOVERABLE;
829 
830 	if (lmp_bredr_capable(hdev)) {
831 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 		settings |= MGMT_SETTING_BREDR;
834 		settings |= MGMT_SETTING_LINK_SECURITY;
835 
836 		if (lmp_ssp_capable(hdev)) {
837 			settings |= MGMT_SETTING_SSP;
838 		}
839 
840 		if (lmp_sc_capable(hdev))
841 			settings |= MGMT_SETTING_SECURE_CONN;
842 
843 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
844 			     &hdev->quirks))
845 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
846 	}
847 
848 	if (lmp_le_capable(hdev)) {
849 		settings |= MGMT_SETTING_LE;
850 		settings |= MGMT_SETTING_SECURE_CONN;
851 		settings |= MGMT_SETTING_PRIVACY;
852 		settings |= MGMT_SETTING_STATIC_ADDRESS;
853 		settings |= MGMT_SETTING_ADVERTISING;
854 	}
855 
856 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
857 	    hdev->set_bdaddr)
858 		settings |= MGMT_SETTING_CONFIGURATION;
859 
860 	if (cis_central_capable(hdev))
861 		settings |= MGMT_SETTING_CIS_CENTRAL;
862 
863 	if (cis_peripheral_capable(hdev))
864 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
865 
866 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
867 
868 	return settings;
869 }
870 
get_current_settings(struct hci_dev * hdev)871 static u32 get_current_settings(struct hci_dev *hdev)
872 {
873 	u32 settings = 0;
874 
875 	if (hdev_is_powered(hdev))
876 		settings |= MGMT_SETTING_POWERED;
877 
878 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
879 		settings |= MGMT_SETTING_CONNECTABLE;
880 
881 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
882 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 		settings |= MGMT_SETTING_DISCOVERABLE;
886 
887 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
888 		settings |= MGMT_SETTING_BONDABLE;
889 
890 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
891 		settings |= MGMT_SETTING_BREDR;
892 
893 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894 		settings |= MGMT_SETTING_LE;
895 
896 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
897 		settings |= MGMT_SETTING_LINK_SECURITY;
898 
899 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
900 		settings |= MGMT_SETTING_SSP;
901 
902 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
903 		settings |= MGMT_SETTING_ADVERTISING;
904 
905 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
906 		settings |= MGMT_SETTING_SECURE_CONN;
907 
908 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
909 		settings |= MGMT_SETTING_DEBUG_KEYS;
910 
911 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
912 		settings |= MGMT_SETTING_PRIVACY;
913 
914 	/* The current setting for static address has two purposes. The
915 	 * first is to indicate if the static address will be used and
916 	 * the second is to indicate if it is actually set.
917 	 *
918 	 * This means if the static address is not configured, this flag
919 	 * will never be set. If the address is configured, then if the
920 	 * address is actually used decides if the flag is set or not.
921 	 *
922 	 * For single mode LE only controllers and dual-mode controllers
923 	 * with BR/EDR disabled, the existence of the static address will
924 	 * be evaluated.
925 	 */
926 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
927 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
928 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
929 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
930 			settings |= MGMT_SETTING_STATIC_ADDRESS;
931 	}
932 
933 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
934 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
935 
936 	if (cis_central_capable(hdev))
937 		settings |= MGMT_SETTING_CIS_CENTRAL;
938 
939 	if (cis_peripheral_capable(hdev))
940 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
941 
942 	if (bis_capable(hdev))
943 		settings |= MGMT_SETTING_ISO_BROADCASTER;
944 
945 	if (sync_recv_capable(hdev))
946 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
947 
948 	return settings;
949 }
950 
pending_find(u16 opcode,struct hci_dev * hdev)951 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
952 {
953 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
954 }
955 
mgmt_get_adv_discov_flags(struct hci_dev * hdev)956 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
957 {
958 	struct mgmt_pending_cmd *cmd;
959 
960 	/* If there's a pending mgmt command the flags will not yet have
961 	 * their final values, so check for this first.
962 	 */
963 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
964 	if (cmd) {
965 		struct mgmt_mode *cp = cmd->param;
966 		if (cp->val == 0x01)
967 			return LE_AD_GENERAL;
968 		else if (cp->val == 0x02)
969 			return LE_AD_LIMITED;
970 	} else {
971 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
972 			return LE_AD_LIMITED;
973 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
974 			return LE_AD_GENERAL;
975 	}
976 
977 	return 0;
978 }
979 
mgmt_get_connectable(struct hci_dev * hdev)980 bool mgmt_get_connectable(struct hci_dev *hdev)
981 {
982 	struct mgmt_pending_cmd *cmd;
983 
984 	/* If there's a pending mgmt command the flag will not yet have
985 	 * it's final value, so check for this first.
986 	 */
987 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
988 	if (cmd) {
989 		struct mgmt_mode *cp = cmd->param;
990 
991 		return cp->val;
992 	}
993 
994 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
995 }
996 
service_cache_sync(struct hci_dev * hdev,void * data)997 static int service_cache_sync(struct hci_dev *hdev, void *data)
998 {
999 	hci_update_eir_sync(hdev);
1000 	hci_update_class_sync(hdev);
1001 
1002 	return 0;
1003 }
1004 
service_cache_off(struct work_struct * work)1005 static void service_cache_off(struct work_struct *work)
1006 {
1007 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1008 					    service_cache.work);
1009 
1010 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1011 		return;
1012 
1013 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1014 }
1015 
rpa_expired_sync(struct hci_dev * hdev,void * data)1016 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1017 {
1018 	/* The generation of a new RPA and programming it into the
1019 	 * controller happens in the hci_req_enable_advertising()
1020 	 * function.
1021 	 */
1022 	if (ext_adv_capable(hdev))
1023 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1024 	else
1025 		return hci_enable_advertising_sync(hdev);
1026 }
1027 
rpa_expired(struct work_struct * work)1028 static void rpa_expired(struct work_struct *work)
1029 {
1030 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1031 					    rpa_expired.work);
1032 
1033 	bt_dev_dbg(hdev, "");
1034 
1035 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1036 
1037 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1038 		return;
1039 
1040 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1041 }
1042 
1043 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1044 
discov_off(struct work_struct * work)1045 static void discov_off(struct work_struct *work)
1046 {
1047 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1048 					    discov_off.work);
1049 
1050 	bt_dev_dbg(hdev, "");
1051 
1052 	hci_dev_lock(hdev);
1053 
1054 	/* When discoverable timeout triggers, then just make sure
1055 	 * the limited discoverable flag is cleared. Even in the case
1056 	 * of a timeout triggered from general discoverable, it is
1057 	 * safe to unconditionally clear the flag.
1058 	 */
1059 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061 	hdev->discov_timeout = 0;
1062 
1063 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1064 
1065 	mgmt_new_settings(hdev);
1066 
1067 	hci_dev_unlock(hdev);
1068 }
1069 
1070 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1071 
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1072 static void mesh_send_complete(struct hci_dev *hdev,
1073 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1074 {
1075 	u8 handle = mesh_tx->handle;
1076 
1077 	if (!silent)
1078 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079 			   sizeof(handle), NULL);
1080 
1081 	mgmt_mesh_remove(mesh_tx);
1082 }
1083 
mesh_send_done_sync(struct hci_dev * hdev,void * data)1084 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1085 {
1086 	struct mgmt_mesh_tx *mesh_tx;
1087 
1088 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089 	hci_disable_advertising_sync(hdev);
1090 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (mesh_tx)
1093 		mesh_send_complete(hdev, mesh_tx, false);
1094 
1095 	return 0;
1096 }
1097 
1098 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1100 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1101 {
1102 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1103 
1104 	if (!mesh_tx)
1105 		return;
1106 
1107 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108 				 mesh_send_start_complete);
1109 
1110 	if (err < 0)
1111 		mesh_send_complete(hdev, mesh_tx, false);
1112 	else
1113 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1114 }
1115 
mesh_send_done(struct work_struct * work)1116 static void mesh_send_done(struct work_struct *work)
1117 {
1118 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1119 					    mesh_send_done.work);
1120 
1121 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1122 		return;
1123 
1124 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1125 }
1126 
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1127 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1128 {
1129 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1130 		return;
1131 
1132 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1133 
1134 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1138 
1139 	/* Non-mgmt controlled devices get this bit set
1140 	 * implicitly so that pairing works for them, however
1141 	 * for mgmt we require user-space to explicitly enable
1142 	 * it
1143 	 */
1144 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1145 
1146 	hci_dev_set_flag(hdev, HCI_MGMT);
1147 }
1148 
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1149 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150 				void *data, u16 data_len)
1151 {
1152 	struct mgmt_rp_read_info rp;
1153 
1154 	bt_dev_dbg(hdev, "sock %p", sk);
1155 
1156 	hci_dev_lock(hdev);
1157 
1158 	memset(&rp, 0, sizeof(rp));
1159 
1160 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1161 
1162 	rp.version = hdev->hci_ver;
1163 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164 
1165 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167 
1168 	memcpy(rp.dev_class, hdev->dev_class, 3);
1169 
1170 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172 
1173 	hci_dev_unlock(hdev);
1174 
1175 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1176 				 sizeof(rp));
1177 }
1178 
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1179 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1180 {
1181 	u16 eir_len = 0;
1182 	size_t name_len;
1183 
1184 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186 					  hdev->dev_class, 3);
1187 
1188 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1190 					  hdev->appearance);
1191 
1192 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194 				  hdev->dev_name, name_len);
1195 
1196 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198 				  hdev->short_name, name_len);
1199 
1200 	return eir_len;
1201 }
1202 
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1203 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204 				    void *data, u16 data_len)
1205 {
1206 	char buf[512];
1207 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1208 	u16 eir_len;
1209 
1210 	bt_dev_dbg(hdev, "sock %p", sk);
1211 
1212 	memset(&buf, 0, sizeof(buf));
1213 
1214 	hci_dev_lock(hdev);
1215 
1216 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1217 
1218 	rp->version = hdev->hci_ver;
1219 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1220 
1221 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1223 
1224 
1225 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226 	rp->eir_len = cpu_to_le16(eir_len);
1227 
1228 	hci_dev_unlock(hdev);
1229 
1230 	/* If this command is called at least once, then the events
1231 	 * for class of device and local name changes are disabled
1232 	 * and only the new extended controller information event
1233 	 * is used.
1234 	 */
1235 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1238 
1239 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240 				 sizeof(*rp) + eir_len);
1241 }
1242 
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1243 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1244 {
1245 	char buf[512];
1246 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1247 	u16 eir_len;
1248 
1249 	memset(buf, 0, sizeof(buf));
1250 
1251 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252 	ev->eir_len = cpu_to_le16(eir_len);
1253 
1254 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255 				  sizeof(*ev) + eir_len,
1256 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1257 }
1258 
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1259 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1260 {
1261 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1262 
1263 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1264 				 sizeof(settings));
1265 }
1266 
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1267 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1268 {
1269 	struct mgmt_ev_advertising_added ev;
1270 
1271 	ev.instance = instance;
1272 
1273 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1274 }
1275 
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1276 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1277 			      u8 instance)
1278 {
1279 	struct mgmt_ev_advertising_removed ev;
1280 
1281 	ev.instance = instance;
1282 
1283 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1284 }
1285 
cancel_adv_timeout(struct hci_dev * hdev)1286 static void cancel_adv_timeout(struct hci_dev *hdev)
1287 {
1288 	if (hdev->adv_instance_timeout) {
1289 		hdev->adv_instance_timeout = 0;
1290 		cancel_delayed_work(&hdev->adv_instance_expire);
1291 	}
1292 }
1293 
1294 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1295 static void restart_le_actions(struct hci_dev *hdev)
1296 {
1297 	struct hci_conn_params *p;
1298 
1299 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1300 		/* Needed for AUTO_OFF case where might not "really"
1301 		 * have been powered off.
1302 		 */
1303 		hci_pend_le_list_del_init(p);
1304 
1305 		switch (p->auto_connect) {
1306 		case HCI_AUTO_CONN_DIRECT:
1307 		case HCI_AUTO_CONN_ALWAYS:
1308 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1309 			break;
1310 		case HCI_AUTO_CONN_REPORT:
1311 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1312 			break;
1313 		default:
1314 			break;
1315 		}
1316 	}
1317 }
1318 
new_settings(struct hci_dev * hdev,struct sock * skip)1319 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1320 {
1321 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1322 
1323 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1325 }
1326 
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1327 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1328 {
1329 	struct mgmt_pending_cmd *cmd = data;
1330 	struct mgmt_mode *cp;
1331 
1332 	/* Make sure cmd still outstanding. */
1333 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1334 		return;
1335 
1336 	cp = cmd->param;
1337 
1338 	bt_dev_dbg(hdev, "err %d", err);
1339 
1340 	if (!err) {
1341 		if (cp->val) {
1342 			hci_dev_lock(hdev);
1343 			restart_le_actions(hdev);
1344 			hci_update_passive_scan(hdev);
1345 			hci_dev_unlock(hdev);
1346 		}
1347 
1348 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1349 
1350 		/* Only call new_setting for power on as power off is deferred
1351 		 * to hdev->power_off work which does call hci_dev_do_close.
1352 		 */
1353 		if (cp->val)
1354 			new_settings(hdev, cmd->sk);
1355 	} else {
1356 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1357 				mgmt_status(err));
1358 	}
1359 
1360 	mgmt_pending_remove(cmd);
1361 }
1362 
set_powered_sync(struct hci_dev * hdev,void * data)1363 static int set_powered_sync(struct hci_dev *hdev, void *data)
1364 {
1365 	struct mgmt_pending_cmd *cmd = data;
1366 	struct mgmt_mode *cp = cmd->param;
1367 
1368 	BT_DBG("%s", hdev->name);
1369 
1370 	return hci_set_powered_sync(hdev, cp->val);
1371 }
1372 
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374 		       u16 len)
1375 {
1376 	struct mgmt_mode *cp = data;
1377 	struct mgmt_pending_cmd *cmd;
1378 	int err;
1379 
1380 	bt_dev_dbg(hdev, "sock %p", sk);
1381 
1382 	if (cp->val != 0x00 && cp->val != 0x01)
1383 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 				       MGMT_STATUS_INVALID_PARAMS);
1385 
1386 	hci_dev_lock(hdev);
1387 
1388 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1389 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1390 				      MGMT_STATUS_BUSY);
1391 		goto failed;
1392 	}
1393 
1394 	if (!!cp->val == hdev_is_powered(hdev)) {
1395 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1396 		goto failed;
1397 	}
1398 
1399 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1400 	if (!cmd) {
1401 		err = -ENOMEM;
1402 		goto failed;
1403 	}
1404 
1405 	/* Cancel potentially blocking sync operation before power off */
1406 	if (cp->val == 0x00) {
1407 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1408 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1409 					 mgmt_set_powered_complete);
1410 	} else {
1411 		/* Use hci_cmd_sync_submit since hdev might not be running */
1412 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1413 					  mgmt_set_powered_complete);
1414 	}
1415 
1416 	if (err < 0)
1417 		mgmt_pending_remove(cmd);
1418 
1419 failed:
1420 	hci_dev_unlock(hdev);
1421 	return err;
1422 }
1423 
mgmt_new_settings(struct hci_dev * hdev)1424 int mgmt_new_settings(struct hci_dev *hdev)
1425 {
1426 	return new_settings(hdev, NULL);
1427 }
1428 
1429 struct cmd_lookup {
1430 	struct sock *sk;
1431 	struct hci_dev *hdev;
1432 	u8 mgmt_status;
1433 };
1434 
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1435 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1436 {
1437 	struct cmd_lookup *match = data;
1438 
1439 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1440 
1441 	list_del(&cmd->list);
1442 
1443 	if (match->sk == NULL) {
1444 		match->sk = cmd->sk;
1445 		sock_hold(match->sk);
1446 	}
1447 
1448 	mgmt_pending_free(cmd);
1449 }
1450 
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1451 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1452 {
1453 	u8 *status = data;
1454 
1455 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1456 	mgmt_pending_remove(cmd);
1457 }
1458 
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1459 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1460 {
1461 	if (cmd->cmd_complete) {
1462 		u8 *status = data;
1463 
1464 		cmd->cmd_complete(cmd, *status);
1465 		mgmt_pending_remove(cmd);
1466 
1467 		return;
1468 	}
1469 
1470 	cmd_status_rsp(cmd, data);
1471 }
1472 
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 				 cmd->param, cmd->param_len);
1477 }
1478 
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 				 cmd->param, sizeof(struct mgmt_addr_info));
1483 }
1484 
mgmt_bredr_support(struct hci_dev * hdev)1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 {
1487 	if (!lmp_bredr_capable(hdev))
1488 		return MGMT_STATUS_NOT_SUPPORTED;
1489 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 		return MGMT_STATUS_REJECTED;
1491 	else
1492 		return MGMT_STATUS_SUCCESS;
1493 }
1494 
mgmt_le_support(struct hci_dev * hdev)1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 {
1497 	if (!lmp_le_capable(hdev))
1498 		return MGMT_STATUS_NOT_SUPPORTED;
1499 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 		return MGMT_STATUS_REJECTED;
1501 	else
1502 		return MGMT_STATUS_SUCCESS;
1503 }
1504 
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1506 					   int err)
1507 {
1508 	struct mgmt_pending_cmd *cmd = data;
1509 
1510 	bt_dev_dbg(hdev, "err %d", err);
1511 
1512 	/* Make sure cmd still outstanding. */
1513 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1514 		return;
1515 
1516 	hci_dev_lock(hdev);
1517 
1518 	if (err) {
1519 		u8 mgmt_err = mgmt_status(err);
1520 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1521 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1522 		goto done;
1523 	}
1524 
1525 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1526 	    hdev->discov_timeout > 0) {
1527 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1528 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1529 	}
1530 
1531 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1532 	new_settings(hdev, cmd->sk);
1533 
1534 done:
1535 	mgmt_pending_remove(cmd);
1536 	hci_dev_unlock(hdev);
1537 }
1538 
set_discoverable_sync(struct hci_dev * hdev,void * data)1539 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1540 {
1541 	BT_DBG("%s", hdev->name);
1542 
1543 	return hci_update_discoverable_sync(hdev);
1544 }
1545 
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1546 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1547 			    u16 len)
1548 {
1549 	struct mgmt_cp_set_discoverable *cp = data;
1550 	struct mgmt_pending_cmd *cmd;
1551 	u16 timeout;
1552 	int err;
1553 
1554 	bt_dev_dbg(hdev, "sock %p", sk);
1555 
1556 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1557 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1558 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1559 				       MGMT_STATUS_REJECTED);
1560 
1561 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1562 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1563 				       MGMT_STATUS_INVALID_PARAMS);
1564 
1565 	timeout = __le16_to_cpu(cp->timeout);
1566 
1567 	/* Disabling discoverable requires that no timeout is set,
1568 	 * and enabling limited discoverable requires a timeout.
1569 	 */
1570 	if ((cp->val == 0x00 && timeout > 0) ||
1571 	    (cp->val == 0x02 && timeout == 0))
1572 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 				       MGMT_STATUS_INVALID_PARAMS);
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (!hdev_is_powered(hdev) && timeout > 0) {
1578 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 				      MGMT_STATUS_NOT_POWERED);
1580 		goto failed;
1581 	}
1582 
1583 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1584 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1585 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 				      MGMT_STATUS_BUSY);
1587 		goto failed;
1588 	}
1589 
1590 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1591 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1592 				      MGMT_STATUS_REJECTED);
1593 		goto failed;
1594 	}
1595 
1596 	if (hdev->advertising_paused) {
1597 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1598 				      MGMT_STATUS_BUSY);
1599 		goto failed;
1600 	}
1601 
1602 	if (!hdev_is_powered(hdev)) {
1603 		bool changed = false;
1604 
1605 		/* Setting limited discoverable when powered off is
1606 		 * not a valid operation since it requires a timeout
1607 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1608 		 */
1609 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1610 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1611 			changed = true;
1612 		}
1613 
1614 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1615 		if (err < 0)
1616 			goto failed;
1617 
1618 		if (changed)
1619 			err = new_settings(hdev, sk);
1620 
1621 		goto failed;
1622 	}
1623 
1624 	/* If the current mode is the same, then just update the timeout
1625 	 * value with the new value. And if only the timeout gets updated,
1626 	 * then no need for any HCI transactions.
1627 	 */
1628 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1629 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1630 						   HCI_LIMITED_DISCOVERABLE)) {
1631 		cancel_delayed_work(&hdev->discov_off);
1632 		hdev->discov_timeout = timeout;
1633 
1634 		if (cp->val && hdev->discov_timeout > 0) {
1635 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1636 			queue_delayed_work(hdev->req_workqueue,
1637 					   &hdev->discov_off, to);
1638 		}
1639 
1640 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1641 		goto failed;
1642 	}
1643 
1644 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1645 	if (!cmd) {
1646 		err = -ENOMEM;
1647 		goto failed;
1648 	}
1649 
1650 	/* Cancel any potential discoverable timeout that might be
1651 	 * still active and store new timeout value. The arming of
1652 	 * the timeout happens in the complete handler.
1653 	 */
1654 	cancel_delayed_work(&hdev->discov_off);
1655 	hdev->discov_timeout = timeout;
1656 
1657 	if (cp->val)
1658 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1659 	else
1660 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1661 
1662 	/* Limited discoverable mode */
1663 	if (cp->val == 0x02)
1664 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1665 	else
1666 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1667 
1668 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1669 				 mgmt_set_discoverable_complete);
1670 
1671 	if (err < 0)
1672 		mgmt_pending_remove(cmd);
1673 
1674 failed:
1675 	hci_dev_unlock(hdev);
1676 	return err;
1677 }
1678 
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1679 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1680 					  int err)
1681 {
1682 	struct mgmt_pending_cmd *cmd = data;
1683 
1684 	bt_dev_dbg(hdev, "err %d", err);
1685 
1686 	/* Make sure cmd still outstanding. */
1687 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1688 		return;
1689 
1690 	hci_dev_lock(hdev);
1691 
1692 	if (err) {
1693 		u8 mgmt_err = mgmt_status(err);
1694 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1695 		goto done;
1696 	}
1697 
1698 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1699 	new_settings(hdev, cmd->sk);
1700 
1701 done:
1702 	if (cmd)
1703 		mgmt_pending_remove(cmd);
1704 
1705 	hci_dev_unlock(hdev);
1706 }
1707 
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1708 static int set_connectable_update_settings(struct hci_dev *hdev,
1709 					   struct sock *sk, u8 val)
1710 {
1711 	bool changed = false;
1712 	int err;
1713 
1714 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1715 		changed = true;
1716 
1717 	if (val) {
1718 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1719 	} else {
1720 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1721 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1722 	}
1723 
1724 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1725 	if (err < 0)
1726 		return err;
1727 
1728 	if (changed) {
1729 		hci_update_scan(hdev);
1730 		hci_update_passive_scan(hdev);
1731 		return new_settings(hdev, sk);
1732 	}
1733 
1734 	return 0;
1735 }
1736 
set_connectable_sync(struct hci_dev * hdev,void * data)1737 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1738 {
1739 	BT_DBG("%s", hdev->name);
1740 
1741 	return hci_update_connectable_sync(hdev);
1742 }
1743 
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1744 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1745 			   u16 len)
1746 {
1747 	struct mgmt_mode *cp = data;
1748 	struct mgmt_pending_cmd *cmd;
1749 	int err;
1750 
1751 	bt_dev_dbg(hdev, "sock %p", sk);
1752 
1753 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1754 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1755 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756 				       MGMT_STATUS_REJECTED);
1757 
1758 	if (cp->val != 0x00 && cp->val != 0x01)
1759 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 				       MGMT_STATUS_INVALID_PARAMS);
1761 
1762 	hci_dev_lock(hdev);
1763 
1764 	if (!hdev_is_powered(hdev)) {
1765 		err = set_connectable_update_settings(hdev, sk, cp->val);
1766 		goto failed;
1767 	}
1768 
1769 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1770 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1771 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1772 				      MGMT_STATUS_BUSY);
1773 		goto failed;
1774 	}
1775 
1776 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1777 	if (!cmd) {
1778 		err = -ENOMEM;
1779 		goto failed;
1780 	}
1781 
1782 	if (cp->val) {
1783 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1784 	} else {
1785 		if (hdev->discov_timeout > 0)
1786 			cancel_delayed_work(&hdev->discov_off);
1787 
1788 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1789 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1790 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1791 	}
1792 
1793 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1794 				 mgmt_set_connectable_complete);
1795 
1796 	if (err < 0)
1797 		mgmt_pending_remove(cmd);
1798 
1799 failed:
1800 	hci_dev_unlock(hdev);
1801 	return err;
1802 }
1803 
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1804 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1805 			u16 len)
1806 {
1807 	struct mgmt_mode *cp = data;
1808 	bool changed;
1809 	int err;
1810 
1811 	bt_dev_dbg(hdev, "sock %p", sk);
1812 
1813 	if (cp->val != 0x00 && cp->val != 0x01)
1814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1815 				       MGMT_STATUS_INVALID_PARAMS);
1816 
1817 	hci_dev_lock(hdev);
1818 
1819 	if (cp->val)
1820 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1821 	else
1822 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1823 
1824 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1825 	if (err < 0)
1826 		goto unlock;
1827 
1828 	if (changed) {
1829 		/* In limited privacy mode the change of bondable mode
1830 		 * may affect the local advertising address.
1831 		 */
1832 		hci_update_discoverable(hdev);
1833 
1834 		err = new_settings(hdev, sk);
1835 	}
1836 
1837 unlock:
1838 	hci_dev_unlock(hdev);
1839 	return err;
1840 }
1841 
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1842 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1843 			     u16 len)
1844 {
1845 	struct mgmt_mode *cp = data;
1846 	struct mgmt_pending_cmd *cmd;
1847 	u8 val, status;
1848 	int err;
1849 
1850 	bt_dev_dbg(hdev, "sock %p", sk);
1851 
1852 	status = mgmt_bredr_support(hdev);
1853 	if (status)
1854 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1855 				       status);
1856 
1857 	if (cp->val != 0x00 && cp->val != 0x01)
1858 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1859 				       MGMT_STATUS_INVALID_PARAMS);
1860 
1861 	hci_dev_lock(hdev);
1862 
1863 	if (!hdev_is_powered(hdev)) {
1864 		bool changed = false;
1865 
1866 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1867 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1868 			changed = true;
1869 		}
1870 
1871 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1872 		if (err < 0)
1873 			goto failed;
1874 
1875 		if (changed)
1876 			err = new_settings(hdev, sk);
1877 
1878 		goto failed;
1879 	}
1880 
1881 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1882 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1883 				      MGMT_STATUS_BUSY);
1884 		goto failed;
1885 	}
1886 
1887 	val = !!cp->val;
1888 
1889 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1890 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1891 		goto failed;
1892 	}
1893 
1894 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1895 	if (!cmd) {
1896 		err = -ENOMEM;
1897 		goto failed;
1898 	}
1899 
1900 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1901 	if (err < 0) {
1902 		mgmt_pending_remove(cmd);
1903 		goto failed;
1904 	}
1905 
1906 failed:
1907 	hci_dev_unlock(hdev);
1908 	return err;
1909 }
1910 
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1911 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1912 {
1913 	struct cmd_lookup match = { NULL, hdev };
1914 	struct mgmt_pending_cmd *cmd = data;
1915 	struct mgmt_mode *cp = cmd->param;
1916 	u8 enable = cp->val;
1917 	bool changed;
1918 
1919 	/* Make sure cmd still outstanding. */
1920 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1921 		return;
1922 
1923 	if (err) {
1924 		u8 mgmt_err = mgmt_status(err);
1925 
1926 		if (enable && hci_dev_test_and_clear_flag(hdev,
1927 							  HCI_SSP_ENABLED)) {
1928 			new_settings(hdev, NULL);
1929 		}
1930 
1931 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1932 				     &mgmt_err);
1933 		return;
1934 	}
1935 
1936 	if (enable) {
1937 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1938 	} else {
1939 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1940 	}
1941 
1942 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1943 
1944 	if (changed)
1945 		new_settings(hdev, match.sk);
1946 
1947 	if (match.sk)
1948 		sock_put(match.sk);
1949 
1950 	hci_update_eir_sync(hdev);
1951 }
1952 
set_ssp_sync(struct hci_dev * hdev,void * data)1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1954 {
1955 	struct mgmt_pending_cmd *cmd = data;
1956 	struct mgmt_mode *cp = cmd->param;
1957 	bool changed = false;
1958 	int err;
1959 
1960 	if (cp->val)
1961 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1962 
1963 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1964 
1965 	if (!err && changed)
1966 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1967 
1968 	return err;
1969 }
1970 
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 {
1973 	struct mgmt_mode *cp = data;
1974 	struct mgmt_pending_cmd *cmd;
1975 	u8 status;
1976 	int err;
1977 
1978 	bt_dev_dbg(hdev, "sock %p", sk);
1979 
1980 	status = mgmt_bredr_support(hdev);
1981 	if (status)
1982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1983 
1984 	if (!lmp_ssp_capable(hdev))
1985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 				       MGMT_STATUS_NOT_SUPPORTED);
1987 
1988 	if (cp->val != 0x00 && cp->val != 0x01)
1989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 				       MGMT_STATUS_INVALID_PARAMS);
1991 
1992 	hci_dev_lock(hdev);
1993 
1994 	if (!hdev_is_powered(hdev)) {
1995 		bool changed;
1996 
1997 		if (cp->val) {
1998 			changed = !hci_dev_test_and_set_flag(hdev,
1999 							     HCI_SSP_ENABLED);
2000 		} else {
2001 			changed = hci_dev_test_and_clear_flag(hdev,
2002 							      HCI_SSP_ENABLED);
2003 		}
2004 
2005 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2006 		if (err < 0)
2007 			goto failed;
2008 
2009 		if (changed)
2010 			err = new_settings(hdev, sk);
2011 
2012 		goto failed;
2013 	}
2014 
2015 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2016 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2017 				      MGMT_STATUS_BUSY);
2018 		goto failed;
2019 	}
2020 
2021 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2022 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2023 		goto failed;
2024 	}
2025 
2026 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2027 	if (!cmd)
2028 		err = -ENOMEM;
2029 	else
2030 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2031 					 set_ssp_complete);
2032 
2033 	if (err < 0) {
2034 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 				      MGMT_STATUS_FAILED);
2036 
2037 		if (cmd)
2038 			mgmt_pending_remove(cmd);
2039 	}
2040 
2041 failed:
2042 	hci_dev_unlock(hdev);
2043 	return err;
2044 }
2045 
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2046 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2047 {
2048 	bt_dev_dbg(hdev, "sock %p", sk);
2049 
2050 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2051 				       MGMT_STATUS_NOT_SUPPORTED);
2052 }
2053 
set_le_complete(struct hci_dev * hdev,void * data,int err)2054 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2055 {
2056 	struct cmd_lookup match = { NULL, hdev };
2057 	u8 status = mgmt_status(err);
2058 
2059 	bt_dev_dbg(hdev, "err %d", err);
2060 
2061 	if (status) {
2062 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2063 							&status);
2064 		return;
2065 	}
2066 
2067 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2068 
2069 	new_settings(hdev, match.sk);
2070 
2071 	if (match.sk)
2072 		sock_put(match.sk);
2073 }
2074 
set_le_sync(struct hci_dev * hdev,void * data)2075 static int set_le_sync(struct hci_dev *hdev, void *data)
2076 {
2077 	struct mgmt_pending_cmd *cmd = data;
2078 	struct mgmt_mode *cp = cmd->param;
2079 	u8 val = !!cp->val;
2080 	int err;
2081 
2082 	if (!val) {
2083 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2084 
2085 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2086 			hci_disable_advertising_sync(hdev);
2087 
2088 		if (ext_adv_capable(hdev))
2089 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2090 	} else {
2091 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2092 	}
2093 
2094 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2095 
2096 	/* Make sure the controller has a good default for
2097 	 * advertising data. Restrict the update to when LE
2098 	 * has actually been enabled. During power on, the
2099 	 * update in powered_update_hci will take care of it.
2100 	 */
2101 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2102 		if (ext_adv_capable(hdev)) {
2103 			int status;
2104 
2105 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2106 			if (!status)
2107 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2108 		} else {
2109 			hci_update_adv_data_sync(hdev, 0x00);
2110 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2111 		}
2112 
2113 		hci_update_passive_scan(hdev);
2114 	}
2115 
2116 	return err;
2117 }
2118 
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2119 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2120 {
2121 	struct mgmt_pending_cmd *cmd = data;
2122 	u8 status = mgmt_status(err);
2123 	struct sock *sk = cmd->sk;
2124 
2125 	if (status) {
2126 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2127 				     cmd_status_rsp, &status);
2128 		return;
2129 	}
2130 
2131 	mgmt_pending_remove(cmd);
2132 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2133 }
2134 
set_mesh_sync(struct hci_dev * hdev,void * data)2135 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2136 {
2137 	struct mgmt_pending_cmd *cmd = data;
2138 	struct mgmt_cp_set_mesh *cp = cmd->param;
2139 	size_t len = cmd->param_len;
2140 
2141 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2142 
2143 	if (cp->enable)
2144 		hci_dev_set_flag(hdev, HCI_MESH);
2145 	else
2146 		hci_dev_clear_flag(hdev, HCI_MESH);
2147 
2148 	len -= sizeof(*cp);
2149 
2150 	/* If filters don't fit, forward all adv pkts */
2151 	if (len <= sizeof(hdev->mesh_ad_types))
2152 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2153 
2154 	hci_update_passive_scan_sync(hdev);
2155 	return 0;
2156 }
2157 
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2158 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2159 {
2160 	struct mgmt_cp_set_mesh *cp = data;
2161 	struct mgmt_pending_cmd *cmd;
2162 	int err = 0;
2163 
2164 	bt_dev_dbg(hdev, "sock %p", sk);
2165 
2166 	if (!lmp_le_capable(hdev) ||
2167 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2168 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2169 				       MGMT_STATUS_NOT_SUPPORTED);
2170 
2171 	if (cp->enable != 0x00 && cp->enable != 0x01)
2172 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2173 				       MGMT_STATUS_INVALID_PARAMS);
2174 
2175 	hci_dev_lock(hdev);
2176 
2177 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2178 	if (!cmd)
2179 		err = -ENOMEM;
2180 	else
2181 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2182 					 set_mesh_complete);
2183 
2184 	if (err < 0) {
2185 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 				      MGMT_STATUS_FAILED);
2187 
2188 		if (cmd)
2189 			mgmt_pending_remove(cmd);
2190 	}
2191 
2192 	hci_dev_unlock(hdev);
2193 	return err;
2194 }
2195 
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2196 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2197 {
2198 	struct mgmt_mesh_tx *mesh_tx = data;
2199 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2200 	unsigned long mesh_send_interval;
2201 	u8 mgmt_err = mgmt_status(err);
2202 
2203 	/* Report any errors here, but don't report completion */
2204 
2205 	if (mgmt_err) {
2206 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2207 		/* Send Complete Error Code for handle */
2208 		mesh_send_complete(hdev, mesh_tx, false);
2209 		return;
2210 	}
2211 
2212 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2213 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2214 			   mesh_send_interval);
2215 }
2216 
mesh_send_sync(struct hci_dev * hdev,void * data)2217 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2218 {
2219 	struct mgmt_mesh_tx *mesh_tx = data;
2220 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2221 	struct adv_info *adv, *next_instance;
2222 	u8 instance = hdev->le_num_of_adv_sets + 1;
2223 	u16 timeout, duration;
2224 	int err = 0;
2225 
2226 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2227 		return MGMT_STATUS_BUSY;
2228 
2229 	timeout = 1000;
2230 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2231 	adv = hci_add_adv_instance(hdev, instance, 0,
2232 				   send->adv_data_len, send->adv_data,
2233 				   0, NULL,
2234 				   timeout, duration,
2235 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2236 				   hdev->le_adv_min_interval,
2237 				   hdev->le_adv_max_interval,
2238 				   mesh_tx->handle);
2239 
2240 	if (!IS_ERR(adv))
2241 		mesh_tx->instance = instance;
2242 	else
2243 		err = PTR_ERR(adv);
2244 
2245 	if (hdev->cur_adv_instance == instance) {
2246 		/* If the currently advertised instance is being changed then
2247 		 * cancel the current advertising and schedule the next
2248 		 * instance. If there is only one instance then the overridden
2249 		 * advertising data will be visible right away.
2250 		 */
2251 		cancel_adv_timeout(hdev);
2252 
2253 		next_instance = hci_get_next_instance(hdev, instance);
2254 		if (next_instance)
2255 			instance = next_instance->instance;
2256 		else
2257 			instance = 0;
2258 	} else if (hdev->adv_instance_timeout) {
2259 		/* Immediately advertise the new instance if no other, or
2260 		 * let it go naturally from queue if ADV is already happening
2261 		 */
2262 		instance = 0;
2263 	}
2264 
2265 	if (instance)
2266 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2267 
2268 	return err;
2269 }
2270 
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2271 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2272 {
2273 	struct mgmt_rp_mesh_read_features *rp = data;
2274 
2275 	if (rp->used_handles >= rp->max_handles)
2276 		return;
2277 
2278 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2279 }
2280 
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2281 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2282 			 void *data, u16 len)
2283 {
2284 	struct mgmt_rp_mesh_read_features rp;
2285 
2286 	if (!lmp_le_capable(hdev) ||
2287 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2288 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2289 				       MGMT_STATUS_NOT_SUPPORTED);
2290 
2291 	memset(&rp, 0, sizeof(rp));
2292 	rp.index = cpu_to_le16(hdev->id);
2293 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2294 		rp.max_handles = MESH_HANDLES_MAX;
2295 
2296 	hci_dev_lock(hdev);
2297 
2298 	if (rp.max_handles)
2299 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2300 
2301 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2302 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2303 
2304 	hci_dev_unlock(hdev);
2305 	return 0;
2306 }
2307 
send_cancel(struct hci_dev * hdev,void * data)2308 static int send_cancel(struct hci_dev *hdev, void *data)
2309 {
2310 	struct mgmt_pending_cmd *cmd = data;
2311 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2312 	struct mgmt_mesh_tx *mesh_tx;
2313 
2314 	if (!cancel->handle) {
2315 		do {
2316 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2317 
2318 			if (mesh_tx)
2319 				mesh_send_complete(hdev, mesh_tx, false);
2320 		} while (mesh_tx);
2321 	} else {
2322 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2323 
2324 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2325 			mesh_send_complete(hdev, mesh_tx, false);
2326 	}
2327 
2328 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2329 			  0, NULL, 0);
2330 	mgmt_pending_free(cmd);
2331 
2332 	return 0;
2333 }
2334 
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2335 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2336 			    void *data, u16 len)
2337 {
2338 	struct mgmt_pending_cmd *cmd;
2339 	int err;
2340 
2341 	if (!lmp_le_capable(hdev) ||
2342 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 				       MGMT_STATUS_NOT_SUPPORTED);
2345 
2346 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2348 				       MGMT_STATUS_REJECTED);
2349 
2350 	hci_dev_lock(hdev);
2351 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2352 	if (!cmd)
2353 		err = -ENOMEM;
2354 	else
2355 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2356 
2357 	if (err < 0) {
2358 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 				      MGMT_STATUS_FAILED);
2360 
2361 		if (cmd)
2362 			mgmt_pending_free(cmd);
2363 	}
2364 
2365 	hci_dev_unlock(hdev);
2366 	return err;
2367 }
2368 
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2369 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2370 {
2371 	struct mgmt_mesh_tx *mesh_tx;
2372 	struct mgmt_cp_mesh_send *send = data;
2373 	struct mgmt_rp_mesh_read_features rp;
2374 	bool sending;
2375 	int err = 0;
2376 
2377 	if (!lmp_le_capable(hdev) ||
2378 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2379 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2380 				       MGMT_STATUS_NOT_SUPPORTED);
2381 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2382 	    len <= MGMT_MESH_SEND_SIZE ||
2383 	    len > (MGMT_MESH_SEND_SIZE + 31))
2384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2385 				       MGMT_STATUS_REJECTED);
2386 
2387 	hci_dev_lock(hdev);
2388 
2389 	memset(&rp, 0, sizeof(rp));
2390 	rp.max_handles = MESH_HANDLES_MAX;
2391 
2392 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2393 
2394 	if (rp.max_handles <= rp.used_handles) {
2395 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2396 				      MGMT_STATUS_BUSY);
2397 		goto done;
2398 	}
2399 
2400 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2401 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2402 
2403 	if (!mesh_tx)
2404 		err = -ENOMEM;
2405 	else if (!sending)
2406 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2407 					 mesh_send_start_complete);
2408 
2409 	if (err < 0) {
2410 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2412 				      MGMT_STATUS_FAILED);
2413 
2414 		if (mesh_tx) {
2415 			if (sending)
2416 				mgmt_mesh_remove(mesh_tx);
2417 		}
2418 	} else {
2419 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2420 
2421 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2422 				  &mesh_tx->handle, 1);
2423 	}
2424 
2425 done:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2430 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2431 {
2432 	struct mgmt_mode *cp = data;
2433 	struct mgmt_pending_cmd *cmd;
2434 	int err;
2435 	u8 val, enabled;
2436 
2437 	bt_dev_dbg(hdev, "sock %p", sk);
2438 
2439 	if (!lmp_le_capable(hdev))
2440 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2441 				       MGMT_STATUS_NOT_SUPPORTED);
2442 
2443 	if (cp->val != 0x00 && cp->val != 0x01)
2444 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2445 				       MGMT_STATUS_INVALID_PARAMS);
2446 
2447 	/* Bluetooth single mode LE only controllers or dual-mode
2448 	 * controllers configured as LE only devices, do not allow
2449 	 * switching LE off. These have either LE enabled explicitly
2450 	 * or BR/EDR has been previously switched off.
2451 	 *
2452 	 * When trying to enable an already enabled LE, then gracefully
2453 	 * send a positive response. Trying to disable it however will
2454 	 * result into rejection.
2455 	 */
2456 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2457 		if (cp->val == 0x01)
2458 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2459 
2460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2461 				       MGMT_STATUS_REJECTED);
2462 	}
2463 
2464 	hci_dev_lock(hdev);
2465 
2466 	val = !!cp->val;
2467 	enabled = lmp_host_le_capable(hdev);
2468 
2469 	if (!hdev_is_powered(hdev) || val == enabled) {
2470 		bool changed = false;
2471 
2472 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2473 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2474 			changed = true;
2475 		}
2476 
2477 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2478 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2479 			changed = true;
2480 		}
2481 
2482 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2483 		if (err < 0)
2484 			goto unlock;
2485 
2486 		if (changed)
2487 			err = new_settings(hdev, sk);
2488 
2489 		goto unlock;
2490 	}
2491 
2492 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2493 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2494 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 				      MGMT_STATUS_BUSY);
2496 		goto unlock;
2497 	}
2498 
2499 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2500 	if (!cmd)
2501 		err = -ENOMEM;
2502 	else
2503 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2504 					 set_le_complete);
2505 
2506 	if (err < 0) {
2507 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2508 				      MGMT_STATUS_FAILED);
2509 
2510 		if (cmd)
2511 			mgmt_pending_remove(cmd);
2512 	}
2513 
2514 unlock:
2515 	hci_dev_unlock(hdev);
2516 	return err;
2517 }
2518 
2519 /* This is a helper function to test for pending mgmt commands that can
2520  * cause CoD or EIR HCI commands. We can only allow one such pending
2521  * mgmt command at a time since otherwise we cannot easily track what
2522  * the current values are, will be, and based on that calculate if a new
2523  * HCI command needs to be sent and if yes with what value.
2524  */
pending_eir_or_class(struct hci_dev * hdev)2525 static bool pending_eir_or_class(struct hci_dev *hdev)
2526 {
2527 	struct mgmt_pending_cmd *cmd;
2528 
2529 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2530 		switch (cmd->opcode) {
2531 		case MGMT_OP_ADD_UUID:
2532 		case MGMT_OP_REMOVE_UUID:
2533 		case MGMT_OP_SET_DEV_CLASS:
2534 		case MGMT_OP_SET_POWERED:
2535 			return true;
2536 		}
2537 	}
2538 
2539 	return false;
2540 }
2541 
2542 static const u8 bluetooth_base_uuid[] = {
2543 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2544 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2545 };
2546 
get_uuid_size(const u8 * uuid)2547 static u8 get_uuid_size(const u8 *uuid)
2548 {
2549 	u32 val;
2550 
2551 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2552 		return 128;
2553 
2554 	val = get_unaligned_le32(&uuid[12]);
2555 	if (val > 0xffff)
2556 		return 32;
2557 
2558 	return 16;
2559 }
2560 
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2561 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2562 {
2563 	struct mgmt_pending_cmd *cmd = data;
2564 
2565 	bt_dev_dbg(hdev, "err %d", err);
2566 
2567 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2568 			  mgmt_status(err), hdev->dev_class, 3);
2569 
2570 	mgmt_pending_free(cmd);
2571 }
2572 
add_uuid_sync(struct hci_dev * hdev,void * data)2573 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2574 {
2575 	int err;
2576 
2577 	err = hci_update_class_sync(hdev);
2578 	if (err)
2579 		return err;
2580 
2581 	return hci_update_eir_sync(hdev);
2582 }
2583 
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2585 {
2586 	struct mgmt_cp_add_uuid *cp = data;
2587 	struct mgmt_pending_cmd *cmd;
2588 	struct bt_uuid *uuid;
2589 	int err;
2590 
2591 	bt_dev_dbg(hdev, "sock %p", sk);
2592 
2593 	hci_dev_lock(hdev);
2594 
2595 	if (pending_eir_or_class(hdev)) {
2596 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2597 				      MGMT_STATUS_BUSY);
2598 		goto failed;
2599 	}
2600 
2601 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2602 	if (!uuid) {
2603 		err = -ENOMEM;
2604 		goto failed;
2605 	}
2606 
2607 	memcpy(uuid->uuid, cp->uuid, 16);
2608 	uuid->svc_hint = cp->svc_hint;
2609 	uuid->size = get_uuid_size(cp->uuid);
2610 
2611 	list_add_tail(&uuid->list, &hdev->uuids);
2612 
2613 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2614 	if (!cmd) {
2615 		err = -ENOMEM;
2616 		goto failed;
2617 	}
2618 
2619 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2620 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2621 	 */
2622 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2623 				  mgmt_class_complete);
2624 	if (err < 0) {
2625 		mgmt_pending_free(cmd);
2626 		goto failed;
2627 	}
2628 
2629 failed:
2630 	hci_dev_unlock(hdev);
2631 	return err;
2632 }
2633 
enable_service_cache(struct hci_dev * hdev)2634 static bool enable_service_cache(struct hci_dev *hdev)
2635 {
2636 	if (!hdev_is_powered(hdev))
2637 		return false;
2638 
2639 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2640 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2641 				   CACHE_TIMEOUT);
2642 		return true;
2643 	}
2644 
2645 	return false;
2646 }
2647 
remove_uuid_sync(struct hci_dev * hdev,void * data)2648 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2649 {
2650 	int err;
2651 
2652 	err = hci_update_class_sync(hdev);
2653 	if (err)
2654 		return err;
2655 
2656 	return hci_update_eir_sync(hdev);
2657 }
2658 
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2659 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2660 		       u16 len)
2661 {
2662 	struct mgmt_cp_remove_uuid *cp = data;
2663 	struct mgmt_pending_cmd *cmd;
2664 	struct bt_uuid *match, *tmp;
2665 	static const u8 bt_uuid_any[] = {
2666 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2667 	};
2668 	int err, found;
2669 
2670 	bt_dev_dbg(hdev, "sock %p", sk);
2671 
2672 	hci_dev_lock(hdev);
2673 
2674 	if (pending_eir_or_class(hdev)) {
2675 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2676 				      MGMT_STATUS_BUSY);
2677 		goto unlock;
2678 	}
2679 
2680 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2681 		hci_uuids_clear(hdev);
2682 
2683 		if (enable_service_cache(hdev)) {
2684 			err = mgmt_cmd_complete(sk, hdev->id,
2685 						MGMT_OP_REMOVE_UUID,
2686 						0, hdev->dev_class, 3);
2687 			goto unlock;
2688 		}
2689 
2690 		goto update_class;
2691 	}
2692 
2693 	found = 0;
2694 
2695 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2696 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2697 			continue;
2698 
2699 		list_del(&match->list);
2700 		kfree(match);
2701 		found++;
2702 	}
2703 
2704 	if (found == 0) {
2705 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2706 				      MGMT_STATUS_INVALID_PARAMS);
2707 		goto unlock;
2708 	}
2709 
2710 update_class:
2711 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2712 	if (!cmd) {
2713 		err = -ENOMEM;
2714 		goto unlock;
2715 	}
2716 
2717 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2718 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2719 	 */
2720 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2721 				  mgmt_class_complete);
2722 	if (err < 0)
2723 		mgmt_pending_free(cmd);
2724 
2725 unlock:
2726 	hci_dev_unlock(hdev);
2727 	return err;
2728 }
2729 
set_class_sync(struct hci_dev * hdev,void * data)2730 static int set_class_sync(struct hci_dev *hdev, void *data)
2731 {
2732 	int err = 0;
2733 
2734 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2735 		cancel_delayed_work_sync(&hdev->service_cache);
2736 		err = hci_update_eir_sync(hdev);
2737 	}
2738 
2739 	if (err)
2740 		return err;
2741 
2742 	return hci_update_class_sync(hdev);
2743 }
2744 
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2745 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2746 			 u16 len)
2747 {
2748 	struct mgmt_cp_set_dev_class *cp = data;
2749 	struct mgmt_pending_cmd *cmd;
2750 	int err;
2751 
2752 	bt_dev_dbg(hdev, "sock %p", sk);
2753 
2754 	if (!lmp_bredr_capable(hdev))
2755 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 				       MGMT_STATUS_NOT_SUPPORTED);
2757 
2758 	hci_dev_lock(hdev);
2759 
2760 	if (pending_eir_or_class(hdev)) {
2761 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 				      MGMT_STATUS_BUSY);
2763 		goto unlock;
2764 	}
2765 
2766 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2767 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2768 				      MGMT_STATUS_INVALID_PARAMS);
2769 		goto unlock;
2770 	}
2771 
2772 	hdev->major_class = cp->major;
2773 	hdev->minor_class = cp->minor;
2774 
2775 	if (!hdev_is_powered(hdev)) {
2776 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2777 					hdev->dev_class, 3);
2778 		goto unlock;
2779 	}
2780 
2781 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2782 	if (!cmd) {
2783 		err = -ENOMEM;
2784 		goto unlock;
2785 	}
2786 
2787 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2788 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2789 	 */
2790 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2791 				  mgmt_class_complete);
2792 	if (err < 0)
2793 		mgmt_pending_free(cmd);
2794 
2795 unlock:
2796 	hci_dev_unlock(hdev);
2797 	return err;
2798 }
2799 
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2800 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2801 			  u16 len)
2802 {
2803 	struct mgmt_cp_load_link_keys *cp = data;
2804 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2805 				   sizeof(struct mgmt_link_key_info));
2806 	u16 key_count, expected_len;
2807 	bool changed;
2808 	int i;
2809 
2810 	bt_dev_dbg(hdev, "sock %p", sk);
2811 
2812 	if (!lmp_bredr_capable(hdev))
2813 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2814 				       MGMT_STATUS_NOT_SUPPORTED);
2815 
2816 	key_count = __le16_to_cpu(cp->key_count);
2817 	if (key_count > max_key_count) {
2818 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2819 			   key_count);
2820 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2821 				       MGMT_STATUS_INVALID_PARAMS);
2822 	}
2823 
2824 	expected_len = struct_size(cp, keys, key_count);
2825 	if (expected_len != len) {
2826 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2827 			   expected_len, len);
2828 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2829 				       MGMT_STATUS_INVALID_PARAMS);
2830 	}
2831 
2832 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2833 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2834 				       MGMT_STATUS_INVALID_PARAMS);
2835 
2836 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2837 		   key_count);
2838 
2839 	for (i = 0; i < key_count; i++) {
2840 		struct mgmt_link_key_info *key = &cp->keys[i];
2841 
2842 		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
2843 		if (key->type > 0x08)
2844 			return mgmt_cmd_status(sk, hdev->id,
2845 					       MGMT_OP_LOAD_LINK_KEYS,
2846 					       MGMT_STATUS_INVALID_PARAMS);
2847 	}
2848 
2849 	hci_dev_lock(hdev);
2850 
2851 	hci_link_keys_clear(hdev);
2852 
2853 	if (cp->debug_keys)
2854 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2855 	else
2856 		changed = hci_dev_test_and_clear_flag(hdev,
2857 						      HCI_KEEP_DEBUG_KEYS);
2858 
2859 	if (changed)
2860 		new_settings(hdev, NULL);
2861 
2862 	for (i = 0; i < key_count; i++) {
2863 		struct mgmt_link_key_info *key = &cp->keys[i];
2864 
2865 		if (hci_is_blocked_key(hdev,
2866 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2867 				       key->val)) {
2868 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2869 				    &key->addr.bdaddr);
2870 			continue;
2871 		}
2872 
2873 		/* Always ignore debug keys and require a new pairing if
2874 		 * the user wants to use them.
2875 		 */
2876 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2877 			continue;
2878 
2879 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2880 				 key->type, key->pin_len, NULL);
2881 	}
2882 
2883 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2884 
2885 	hci_dev_unlock(hdev);
2886 
2887 	return 0;
2888 }
2889 
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2890 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2891 			   u8 addr_type, struct sock *skip_sk)
2892 {
2893 	struct mgmt_ev_device_unpaired ev;
2894 
2895 	bacpy(&ev.addr.bdaddr, bdaddr);
2896 	ev.addr.type = addr_type;
2897 
2898 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2899 			  skip_sk);
2900 }
2901 
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2902 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2903 {
2904 	struct mgmt_pending_cmd *cmd = data;
2905 	struct mgmt_cp_unpair_device *cp = cmd->param;
2906 
2907 	if (!err)
2908 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2909 
2910 	cmd->cmd_complete(cmd, err);
2911 	mgmt_pending_free(cmd);
2912 }
2913 
unpair_device_sync(struct hci_dev * hdev,void * data)2914 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2915 {
2916 	struct mgmt_pending_cmd *cmd = data;
2917 	struct mgmt_cp_unpair_device *cp = cmd->param;
2918 	struct hci_conn *conn;
2919 
2920 	if (cp->addr.type == BDADDR_BREDR)
2921 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2922 					       &cp->addr.bdaddr);
2923 	else
2924 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2925 					       le_addr_type(cp->addr.type));
2926 
2927 	if (!conn)
2928 		return 0;
2929 
2930 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2931 }
2932 
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2933 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2934 			 u16 len)
2935 {
2936 	struct mgmt_cp_unpair_device *cp = data;
2937 	struct mgmt_rp_unpair_device rp;
2938 	struct hci_conn_params *params;
2939 	struct mgmt_pending_cmd *cmd;
2940 	struct hci_conn *conn;
2941 	u8 addr_type;
2942 	int err;
2943 
2944 	memset(&rp, 0, sizeof(rp));
2945 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2946 	rp.addr.type = cp->addr.type;
2947 
2948 	if (!bdaddr_type_is_valid(cp->addr.type))
2949 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2950 					 MGMT_STATUS_INVALID_PARAMS,
2951 					 &rp, sizeof(rp));
2952 
2953 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2954 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2955 					 MGMT_STATUS_INVALID_PARAMS,
2956 					 &rp, sizeof(rp));
2957 
2958 	hci_dev_lock(hdev);
2959 
2960 	if (!hdev_is_powered(hdev)) {
2961 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2962 					MGMT_STATUS_NOT_POWERED, &rp,
2963 					sizeof(rp));
2964 		goto unlock;
2965 	}
2966 
2967 	if (cp->addr.type == BDADDR_BREDR) {
2968 		/* If disconnection is requested, then look up the
2969 		 * connection. If the remote device is connected, it
2970 		 * will be later used to terminate the link.
2971 		 *
2972 		 * Setting it to NULL explicitly will cause no
2973 		 * termination of the link.
2974 		 */
2975 		if (cp->disconnect)
2976 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2977 						       &cp->addr.bdaddr);
2978 		else
2979 			conn = NULL;
2980 
2981 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2982 		if (err < 0) {
2983 			err = mgmt_cmd_complete(sk, hdev->id,
2984 						MGMT_OP_UNPAIR_DEVICE,
2985 						MGMT_STATUS_NOT_PAIRED, &rp,
2986 						sizeof(rp));
2987 			goto unlock;
2988 		}
2989 
2990 		goto done;
2991 	}
2992 
2993 	/* LE address type */
2994 	addr_type = le_addr_type(cp->addr.type);
2995 
2996 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2997 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2998 	if (err < 0) {
2999 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3000 					MGMT_STATUS_NOT_PAIRED, &rp,
3001 					sizeof(rp));
3002 		goto unlock;
3003 	}
3004 
3005 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3006 	if (!conn) {
3007 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3008 		goto done;
3009 	}
3010 
3011 
3012 	/* Defer clearing up the connection parameters until closing to
3013 	 * give a chance of keeping them if a repairing happens.
3014 	 */
3015 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3016 
3017 	/* Disable auto-connection parameters if present */
3018 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3019 	if (params) {
3020 		if (params->explicit_connect)
3021 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3022 		else
3023 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3024 	}
3025 
3026 	/* If disconnection is not requested, then clear the connection
3027 	 * variable so that the link is not terminated.
3028 	 */
3029 	if (!cp->disconnect)
3030 		conn = NULL;
3031 
3032 done:
3033 	/* If the connection variable is set, then termination of the
3034 	 * link is requested.
3035 	 */
3036 	if (!conn) {
3037 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3038 					&rp, sizeof(rp));
3039 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3040 		goto unlock;
3041 	}
3042 
3043 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3044 			       sizeof(*cp));
3045 	if (!cmd) {
3046 		err = -ENOMEM;
3047 		goto unlock;
3048 	}
3049 
3050 	cmd->cmd_complete = addr_cmd_complete;
3051 
3052 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3053 				 unpair_device_complete);
3054 	if (err < 0)
3055 		mgmt_pending_free(cmd);
3056 
3057 unlock:
3058 	hci_dev_unlock(hdev);
3059 	return err;
3060 }
3061 
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3062 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3063 		      u16 len)
3064 {
3065 	struct mgmt_cp_disconnect *cp = data;
3066 	struct mgmt_rp_disconnect rp;
3067 	struct mgmt_pending_cmd *cmd;
3068 	struct hci_conn *conn;
3069 	int err;
3070 
3071 	bt_dev_dbg(hdev, "sock %p", sk);
3072 
3073 	memset(&rp, 0, sizeof(rp));
3074 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3075 	rp.addr.type = cp->addr.type;
3076 
3077 	if (!bdaddr_type_is_valid(cp->addr.type))
3078 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3079 					 MGMT_STATUS_INVALID_PARAMS,
3080 					 &rp, sizeof(rp));
3081 
3082 	hci_dev_lock(hdev);
3083 
3084 	if (!test_bit(HCI_UP, &hdev->flags)) {
3085 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3086 					MGMT_STATUS_NOT_POWERED, &rp,
3087 					sizeof(rp));
3088 		goto failed;
3089 	}
3090 
3091 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3092 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3093 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3094 		goto failed;
3095 	}
3096 
3097 	if (cp->addr.type == BDADDR_BREDR)
3098 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3099 					       &cp->addr.bdaddr);
3100 	else
3101 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3102 					       le_addr_type(cp->addr.type));
3103 
3104 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3105 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3106 					MGMT_STATUS_NOT_CONNECTED, &rp,
3107 					sizeof(rp));
3108 		goto failed;
3109 	}
3110 
3111 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3112 	if (!cmd) {
3113 		err = -ENOMEM;
3114 		goto failed;
3115 	}
3116 
3117 	cmd->cmd_complete = generic_cmd_complete;
3118 
3119 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3120 	if (err < 0)
3121 		mgmt_pending_remove(cmd);
3122 
3123 failed:
3124 	hci_dev_unlock(hdev);
3125 	return err;
3126 }
3127 
link_to_bdaddr(u8 link_type,u8 addr_type)3128 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3129 {
3130 	switch (link_type) {
3131 	case ISO_LINK:
3132 	case LE_LINK:
3133 		switch (addr_type) {
3134 		case ADDR_LE_DEV_PUBLIC:
3135 			return BDADDR_LE_PUBLIC;
3136 
3137 		default:
3138 			/* Fallback to LE Random address type */
3139 			return BDADDR_LE_RANDOM;
3140 		}
3141 
3142 	default:
3143 		/* Fallback to BR/EDR type */
3144 		return BDADDR_BREDR;
3145 	}
3146 }
3147 
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3148 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3149 			   u16 data_len)
3150 {
3151 	struct mgmt_rp_get_connections *rp;
3152 	struct hci_conn *c;
3153 	int err;
3154 	u16 i;
3155 
3156 	bt_dev_dbg(hdev, "sock %p", sk);
3157 
3158 	hci_dev_lock(hdev);
3159 
3160 	if (!hdev_is_powered(hdev)) {
3161 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3162 				      MGMT_STATUS_NOT_POWERED);
3163 		goto unlock;
3164 	}
3165 
3166 	i = 0;
3167 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3168 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3169 			i++;
3170 	}
3171 
3172 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3173 	if (!rp) {
3174 		err = -ENOMEM;
3175 		goto unlock;
3176 	}
3177 
3178 	i = 0;
3179 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3180 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3181 			continue;
3182 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3183 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3184 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3185 			continue;
3186 		i++;
3187 	}
3188 
3189 	rp->conn_count = cpu_to_le16(i);
3190 
3191 	/* Recalculate length in case of filtered SCO connections, etc */
3192 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3193 				struct_size(rp, addr, i));
3194 
3195 	kfree(rp);
3196 
3197 unlock:
3198 	hci_dev_unlock(hdev);
3199 	return err;
3200 }
3201 
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3202 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3203 				   struct mgmt_cp_pin_code_neg_reply *cp)
3204 {
3205 	struct mgmt_pending_cmd *cmd;
3206 	int err;
3207 
3208 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3209 			       sizeof(*cp));
3210 	if (!cmd)
3211 		return -ENOMEM;
3212 
3213 	cmd->cmd_complete = addr_cmd_complete;
3214 
3215 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3216 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3217 	if (err < 0)
3218 		mgmt_pending_remove(cmd);
3219 
3220 	return err;
3221 }
3222 
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3223 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3224 			  u16 len)
3225 {
3226 	struct hci_conn *conn;
3227 	struct mgmt_cp_pin_code_reply *cp = data;
3228 	struct hci_cp_pin_code_reply reply;
3229 	struct mgmt_pending_cmd *cmd;
3230 	int err;
3231 
3232 	bt_dev_dbg(hdev, "sock %p", sk);
3233 
3234 	hci_dev_lock(hdev);
3235 
3236 	if (!hdev_is_powered(hdev)) {
3237 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3238 				      MGMT_STATUS_NOT_POWERED);
3239 		goto failed;
3240 	}
3241 
3242 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3243 	if (!conn) {
3244 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3245 				      MGMT_STATUS_NOT_CONNECTED);
3246 		goto failed;
3247 	}
3248 
3249 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3250 		struct mgmt_cp_pin_code_neg_reply ncp;
3251 
3252 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3253 
3254 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3255 
3256 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3257 		if (err >= 0)
3258 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3259 					      MGMT_STATUS_INVALID_PARAMS);
3260 
3261 		goto failed;
3262 	}
3263 
3264 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3265 	if (!cmd) {
3266 		err = -ENOMEM;
3267 		goto failed;
3268 	}
3269 
3270 	cmd->cmd_complete = addr_cmd_complete;
3271 
3272 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3273 	reply.pin_len = cp->pin_len;
3274 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3275 
3276 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3277 	if (err < 0)
3278 		mgmt_pending_remove(cmd);
3279 
3280 failed:
3281 	hci_dev_unlock(hdev);
3282 	return err;
3283 }
3284 
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3285 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3286 			     u16 len)
3287 {
3288 	struct mgmt_cp_set_io_capability *cp = data;
3289 
3290 	bt_dev_dbg(hdev, "sock %p", sk);
3291 
3292 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3293 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3294 				       MGMT_STATUS_INVALID_PARAMS);
3295 
3296 	hci_dev_lock(hdev);
3297 
3298 	hdev->io_capability = cp->io_capability;
3299 
3300 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3301 
3302 	hci_dev_unlock(hdev);
3303 
3304 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3305 				 NULL, 0);
3306 }
3307 
find_pairing(struct hci_conn * conn)3308 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3309 {
3310 	struct hci_dev *hdev = conn->hdev;
3311 	struct mgmt_pending_cmd *cmd;
3312 
3313 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3314 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3315 			continue;
3316 
3317 		if (cmd->user_data != conn)
3318 			continue;
3319 
3320 		return cmd;
3321 	}
3322 
3323 	return NULL;
3324 }
3325 
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3326 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3327 {
3328 	struct mgmt_rp_pair_device rp;
3329 	struct hci_conn *conn = cmd->user_data;
3330 	int err;
3331 
3332 	bacpy(&rp.addr.bdaddr, &conn->dst);
3333 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3334 
3335 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3336 				status, &rp, sizeof(rp));
3337 
3338 	/* So we don't get further callbacks for this connection */
3339 	conn->connect_cfm_cb = NULL;
3340 	conn->security_cfm_cb = NULL;
3341 	conn->disconn_cfm_cb = NULL;
3342 
3343 	hci_conn_drop(conn);
3344 
3345 	/* The device is paired so there is no need to remove
3346 	 * its connection parameters anymore.
3347 	 */
3348 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3349 
3350 	hci_conn_put(conn);
3351 
3352 	return err;
3353 }
3354 
mgmt_smp_complete(struct hci_conn * conn,bool complete)3355 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3356 {
3357 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3358 	struct mgmt_pending_cmd *cmd;
3359 
3360 	cmd = find_pairing(conn);
3361 	if (cmd) {
3362 		cmd->cmd_complete(cmd, status);
3363 		mgmt_pending_remove(cmd);
3364 	}
3365 }
3366 
pairing_complete_cb(struct hci_conn * conn,u8 status)3367 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3368 {
3369 	struct mgmt_pending_cmd *cmd;
3370 
3371 	BT_DBG("status %u", status);
3372 
3373 	cmd = find_pairing(conn);
3374 	if (!cmd) {
3375 		BT_DBG("Unable to find a pending command");
3376 		return;
3377 	}
3378 
3379 	cmd->cmd_complete(cmd, mgmt_status(status));
3380 	mgmt_pending_remove(cmd);
3381 }
3382 
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3383 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3384 {
3385 	struct mgmt_pending_cmd *cmd;
3386 
3387 	BT_DBG("status %u", status);
3388 
3389 	if (!status)
3390 		return;
3391 
3392 	cmd = find_pairing(conn);
3393 	if (!cmd) {
3394 		BT_DBG("Unable to find a pending command");
3395 		return;
3396 	}
3397 
3398 	cmd->cmd_complete(cmd, mgmt_status(status));
3399 	mgmt_pending_remove(cmd);
3400 }
3401 
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3402 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3403 		       u16 len)
3404 {
3405 	struct mgmt_cp_pair_device *cp = data;
3406 	struct mgmt_rp_pair_device rp;
3407 	struct mgmt_pending_cmd *cmd;
3408 	u8 sec_level, auth_type;
3409 	struct hci_conn *conn;
3410 	int err;
3411 
3412 	bt_dev_dbg(hdev, "sock %p", sk);
3413 
3414 	memset(&rp, 0, sizeof(rp));
3415 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3416 	rp.addr.type = cp->addr.type;
3417 
3418 	if (!bdaddr_type_is_valid(cp->addr.type))
3419 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3420 					 MGMT_STATUS_INVALID_PARAMS,
3421 					 &rp, sizeof(rp));
3422 
3423 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3424 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3425 					 MGMT_STATUS_INVALID_PARAMS,
3426 					 &rp, sizeof(rp));
3427 
3428 	hci_dev_lock(hdev);
3429 
3430 	if (!hdev_is_powered(hdev)) {
3431 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3432 					MGMT_STATUS_NOT_POWERED, &rp,
3433 					sizeof(rp));
3434 		goto unlock;
3435 	}
3436 
3437 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3438 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3439 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3440 					sizeof(rp));
3441 		goto unlock;
3442 	}
3443 
3444 	sec_level = BT_SECURITY_MEDIUM;
3445 	auth_type = HCI_AT_DEDICATED_BONDING;
3446 
3447 	if (cp->addr.type == BDADDR_BREDR) {
3448 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3449 				       auth_type, CONN_REASON_PAIR_DEVICE);
3450 	} else {
3451 		u8 addr_type = le_addr_type(cp->addr.type);
3452 		struct hci_conn_params *p;
3453 
3454 		/* When pairing a new device, it is expected to remember
3455 		 * this device for future connections. Adding the connection
3456 		 * parameter information ahead of time allows tracking
3457 		 * of the peripheral preferred values and will speed up any
3458 		 * further connection establishment.
3459 		 *
3460 		 * If connection parameters already exist, then they
3461 		 * will be kept and this function does nothing.
3462 		 */
3463 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3464 
3465 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3466 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3467 
3468 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3469 					   sec_level, HCI_LE_CONN_TIMEOUT,
3470 					   CONN_REASON_PAIR_DEVICE);
3471 	}
3472 
3473 	if (IS_ERR(conn)) {
3474 		int status;
3475 
3476 		if (PTR_ERR(conn) == -EBUSY)
3477 			status = MGMT_STATUS_BUSY;
3478 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3479 			status = MGMT_STATUS_NOT_SUPPORTED;
3480 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3481 			status = MGMT_STATUS_REJECTED;
3482 		else
3483 			status = MGMT_STATUS_CONNECT_FAILED;
3484 
3485 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3486 					status, &rp, sizeof(rp));
3487 		goto unlock;
3488 	}
3489 
3490 	if (conn->connect_cfm_cb) {
3491 		hci_conn_drop(conn);
3492 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3493 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3494 		goto unlock;
3495 	}
3496 
3497 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3498 	if (!cmd) {
3499 		err = -ENOMEM;
3500 		hci_conn_drop(conn);
3501 		goto unlock;
3502 	}
3503 
3504 	cmd->cmd_complete = pairing_complete;
3505 
3506 	/* For LE, just connecting isn't a proof that the pairing finished */
3507 	if (cp->addr.type == BDADDR_BREDR) {
3508 		conn->connect_cfm_cb = pairing_complete_cb;
3509 		conn->security_cfm_cb = pairing_complete_cb;
3510 		conn->disconn_cfm_cb = pairing_complete_cb;
3511 	} else {
3512 		conn->connect_cfm_cb = le_pairing_complete_cb;
3513 		conn->security_cfm_cb = le_pairing_complete_cb;
3514 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3515 	}
3516 
3517 	conn->io_capability = cp->io_cap;
3518 	cmd->user_data = hci_conn_get(conn);
3519 
3520 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3521 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3522 		cmd->cmd_complete(cmd, 0);
3523 		mgmt_pending_remove(cmd);
3524 	}
3525 
3526 	err = 0;
3527 
3528 unlock:
3529 	hci_dev_unlock(hdev);
3530 	return err;
3531 }
3532 
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3533 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3534 			      u16 len)
3535 {
3536 	struct mgmt_addr_info *addr = data;
3537 	struct mgmt_pending_cmd *cmd;
3538 	struct hci_conn *conn;
3539 	int err;
3540 
3541 	bt_dev_dbg(hdev, "sock %p", sk);
3542 
3543 	hci_dev_lock(hdev);
3544 
3545 	if (!hdev_is_powered(hdev)) {
3546 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3547 				      MGMT_STATUS_NOT_POWERED);
3548 		goto unlock;
3549 	}
3550 
3551 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3552 	if (!cmd) {
3553 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3554 				      MGMT_STATUS_INVALID_PARAMS);
3555 		goto unlock;
3556 	}
3557 
3558 	conn = cmd->user_data;
3559 
3560 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3561 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3562 				      MGMT_STATUS_INVALID_PARAMS);
3563 		goto unlock;
3564 	}
3565 
3566 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3567 	mgmt_pending_remove(cmd);
3568 
3569 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3570 				addr, sizeof(*addr));
3571 
3572 	/* Since user doesn't want to proceed with the connection, abort any
3573 	 * ongoing pairing and then terminate the link if it was created
3574 	 * because of the pair device action.
3575 	 */
3576 	if (addr->type == BDADDR_BREDR)
3577 		hci_remove_link_key(hdev, &addr->bdaddr);
3578 	else
3579 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3580 					      le_addr_type(addr->type));
3581 
3582 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3583 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3584 
3585 unlock:
3586 	hci_dev_unlock(hdev);
3587 	return err;
3588 }
3589 
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3590 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3591 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3592 			     u16 hci_op, __le32 passkey)
3593 {
3594 	struct mgmt_pending_cmd *cmd;
3595 	struct hci_conn *conn;
3596 	int err;
3597 
3598 	hci_dev_lock(hdev);
3599 
3600 	if (!hdev_is_powered(hdev)) {
3601 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3602 					MGMT_STATUS_NOT_POWERED, addr,
3603 					sizeof(*addr));
3604 		goto done;
3605 	}
3606 
3607 	if (addr->type == BDADDR_BREDR)
3608 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3609 	else
3610 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3611 					       le_addr_type(addr->type));
3612 
3613 	if (!conn) {
3614 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3615 					MGMT_STATUS_NOT_CONNECTED, addr,
3616 					sizeof(*addr));
3617 		goto done;
3618 	}
3619 
3620 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3621 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3622 		if (!err)
3623 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3624 						MGMT_STATUS_SUCCESS, addr,
3625 						sizeof(*addr));
3626 		else
3627 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3628 						MGMT_STATUS_FAILED, addr,
3629 						sizeof(*addr));
3630 
3631 		goto done;
3632 	}
3633 
3634 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3635 	if (!cmd) {
3636 		err = -ENOMEM;
3637 		goto done;
3638 	}
3639 
3640 	cmd->cmd_complete = addr_cmd_complete;
3641 
3642 	/* Continue with pairing via HCI */
3643 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3644 		struct hci_cp_user_passkey_reply cp;
3645 
3646 		bacpy(&cp.bdaddr, &addr->bdaddr);
3647 		cp.passkey = passkey;
3648 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3649 	} else
3650 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3651 				   &addr->bdaddr);
3652 
3653 	if (err < 0)
3654 		mgmt_pending_remove(cmd);
3655 
3656 done:
3657 	hci_dev_unlock(hdev);
3658 	return err;
3659 }
3660 
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3661 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3662 			      void *data, u16 len)
3663 {
3664 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3665 
3666 	bt_dev_dbg(hdev, "sock %p", sk);
3667 
3668 	return user_pairing_resp(sk, hdev, &cp->addr,
3669 				MGMT_OP_PIN_CODE_NEG_REPLY,
3670 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3671 }
3672 
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3673 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3674 			      u16 len)
3675 {
3676 	struct mgmt_cp_user_confirm_reply *cp = data;
3677 
3678 	bt_dev_dbg(hdev, "sock %p", sk);
3679 
3680 	if (len != sizeof(*cp))
3681 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3682 				       MGMT_STATUS_INVALID_PARAMS);
3683 
3684 	return user_pairing_resp(sk, hdev, &cp->addr,
3685 				 MGMT_OP_USER_CONFIRM_REPLY,
3686 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3687 }
3688 
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3689 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3690 				  void *data, u16 len)
3691 {
3692 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3693 
3694 	bt_dev_dbg(hdev, "sock %p", sk);
3695 
3696 	return user_pairing_resp(sk, hdev, &cp->addr,
3697 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3698 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3699 }
3700 
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3701 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3702 			      u16 len)
3703 {
3704 	struct mgmt_cp_user_passkey_reply *cp = data;
3705 
3706 	bt_dev_dbg(hdev, "sock %p", sk);
3707 
3708 	return user_pairing_resp(sk, hdev, &cp->addr,
3709 				 MGMT_OP_USER_PASSKEY_REPLY,
3710 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3711 }
3712 
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3713 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3714 				  void *data, u16 len)
3715 {
3716 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3717 
3718 	bt_dev_dbg(hdev, "sock %p", sk);
3719 
3720 	return user_pairing_resp(sk, hdev, &cp->addr,
3721 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3722 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3723 }
3724 
adv_expire_sync(struct hci_dev * hdev,u32 flags)3725 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3726 {
3727 	struct adv_info *adv_instance;
3728 
3729 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3730 	if (!adv_instance)
3731 		return 0;
3732 
3733 	/* stop if current instance doesn't need to be changed */
3734 	if (!(adv_instance->flags & flags))
3735 		return 0;
3736 
3737 	cancel_adv_timeout(hdev);
3738 
3739 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3740 	if (!adv_instance)
3741 		return 0;
3742 
3743 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3744 
3745 	return 0;
3746 }
3747 
name_changed_sync(struct hci_dev * hdev,void * data)3748 static int name_changed_sync(struct hci_dev *hdev, void *data)
3749 {
3750 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3751 }
3752 
set_name_complete(struct hci_dev * hdev,void * data,int err)3753 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3754 {
3755 	struct mgmt_pending_cmd *cmd = data;
3756 	struct mgmt_cp_set_local_name *cp = cmd->param;
3757 	u8 status = mgmt_status(err);
3758 
3759 	bt_dev_dbg(hdev, "err %d", err);
3760 
3761 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3762 		return;
3763 
3764 	if (status) {
3765 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3766 				status);
3767 	} else {
3768 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3769 				  cp, sizeof(*cp));
3770 
3771 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3772 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3773 	}
3774 
3775 	mgmt_pending_remove(cmd);
3776 }
3777 
set_name_sync(struct hci_dev * hdev,void * data)3778 static int set_name_sync(struct hci_dev *hdev, void *data)
3779 {
3780 	if (lmp_bredr_capable(hdev)) {
3781 		hci_update_name_sync(hdev);
3782 		hci_update_eir_sync(hdev);
3783 	}
3784 
3785 	/* The name is stored in the scan response data and so
3786 	 * no need to update the advertising data here.
3787 	 */
3788 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3789 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3790 
3791 	return 0;
3792 }
3793 
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3794 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3795 			  u16 len)
3796 {
3797 	struct mgmt_cp_set_local_name *cp = data;
3798 	struct mgmt_pending_cmd *cmd;
3799 	int err;
3800 
3801 	bt_dev_dbg(hdev, "sock %p", sk);
3802 
3803 	hci_dev_lock(hdev);
3804 
3805 	/* If the old values are the same as the new ones just return a
3806 	 * direct command complete event.
3807 	 */
3808 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3809 	    !memcmp(hdev->short_name, cp->short_name,
3810 		    sizeof(hdev->short_name))) {
3811 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3812 					data, len);
3813 		goto failed;
3814 	}
3815 
3816 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3817 
3818 	if (!hdev_is_powered(hdev)) {
3819 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3820 
3821 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3822 					data, len);
3823 		if (err < 0)
3824 			goto failed;
3825 
3826 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3827 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3828 		ext_info_changed(hdev, sk);
3829 
3830 		goto failed;
3831 	}
3832 
3833 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3834 	if (!cmd)
3835 		err = -ENOMEM;
3836 	else
3837 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3838 					 set_name_complete);
3839 
3840 	if (err < 0) {
3841 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3842 				      MGMT_STATUS_FAILED);
3843 
3844 		if (cmd)
3845 			mgmt_pending_remove(cmd);
3846 
3847 		goto failed;
3848 	}
3849 
3850 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3851 
3852 failed:
3853 	hci_dev_unlock(hdev);
3854 	return err;
3855 }
3856 
appearance_changed_sync(struct hci_dev * hdev,void * data)3857 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3858 {
3859 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3860 }
3861 
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3862 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3863 			  u16 len)
3864 {
3865 	struct mgmt_cp_set_appearance *cp = data;
3866 	u16 appearance;
3867 	int err;
3868 
3869 	bt_dev_dbg(hdev, "sock %p", sk);
3870 
3871 	if (!lmp_le_capable(hdev))
3872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3873 				       MGMT_STATUS_NOT_SUPPORTED);
3874 
3875 	appearance = le16_to_cpu(cp->appearance);
3876 
3877 	hci_dev_lock(hdev);
3878 
3879 	if (hdev->appearance != appearance) {
3880 		hdev->appearance = appearance;
3881 
3882 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3883 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3884 					   NULL);
3885 
3886 		ext_info_changed(hdev, sk);
3887 	}
3888 
3889 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3890 				0);
3891 
3892 	hci_dev_unlock(hdev);
3893 
3894 	return err;
3895 }
3896 
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3897 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3898 				 void *data, u16 len)
3899 {
3900 	struct mgmt_rp_get_phy_configuration rp;
3901 
3902 	bt_dev_dbg(hdev, "sock %p", sk);
3903 
3904 	hci_dev_lock(hdev);
3905 
3906 	memset(&rp, 0, sizeof(rp));
3907 
3908 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3909 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3910 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3911 
3912 	hci_dev_unlock(hdev);
3913 
3914 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3915 				 &rp, sizeof(rp));
3916 }
3917 
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3918 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3919 {
3920 	struct mgmt_ev_phy_configuration_changed ev;
3921 
3922 	memset(&ev, 0, sizeof(ev));
3923 
3924 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3925 
3926 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3927 			  sizeof(ev), skip);
3928 }
3929 
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3930 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3931 {
3932 	struct mgmt_pending_cmd *cmd = data;
3933 	struct sk_buff *skb = cmd->skb;
3934 	u8 status = mgmt_status(err);
3935 
3936 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3937 		return;
3938 
3939 	if (!status) {
3940 		if (!skb)
3941 			status = MGMT_STATUS_FAILED;
3942 		else if (IS_ERR(skb))
3943 			status = mgmt_status(PTR_ERR(skb));
3944 		else
3945 			status = mgmt_status(skb->data[0]);
3946 	}
3947 
3948 	bt_dev_dbg(hdev, "status %d", status);
3949 
3950 	if (status) {
3951 		mgmt_cmd_status(cmd->sk, hdev->id,
3952 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3953 	} else {
3954 		mgmt_cmd_complete(cmd->sk, hdev->id,
3955 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3956 				  NULL, 0);
3957 
3958 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3959 	}
3960 
3961 	if (skb && !IS_ERR(skb))
3962 		kfree_skb(skb);
3963 
3964 	mgmt_pending_remove(cmd);
3965 }
3966 
set_default_phy_sync(struct hci_dev * hdev,void * data)3967 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3968 {
3969 	struct mgmt_pending_cmd *cmd = data;
3970 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3971 	struct hci_cp_le_set_default_phy cp_phy;
3972 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3973 
3974 	memset(&cp_phy, 0, sizeof(cp_phy));
3975 
3976 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3977 		cp_phy.all_phys |= 0x01;
3978 
3979 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3980 		cp_phy.all_phys |= 0x02;
3981 
3982 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3983 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3984 
3985 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3986 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3987 
3988 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3989 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3990 
3991 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3992 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3993 
3994 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3995 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3996 
3997 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3998 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3999 
4000 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4001 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4002 
4003 	return 0;
4004 }
4005 
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4006 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4007 				 void *data, u16 len)
4008 {
4009 	struct mgmt_cp_set_phy_configuration *cp = data;
4010 	struct mgmt_pending_cmd *cmd;
4011 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4012 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4013 	bool changed = false;
4014 	int err;
4015 
4016 	bt_dev_dbg(hdev, "sock %p", sk);
4017 
4018 	configurable_phys = get_configurable_phys(hdev);
4019 	supported_phys = get_supported_phys(hdev);
4020 	selected_phys = __le32_to_cpu(cp->selected_phys);
4021 
4022 	if (selected_phys & ~supported_phys)
4023 		return mgmt_cmd_status(sk, hdev->id,
4024 				       MGMT_OP_SET_PHY_CONFIGURATION,
4025 				       MGMT_STATUS_INVALID_PARAMS);
4026 
4027 	unconfigure_phys = supported_phys & ~configurable_phys;
4028 
4029 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4030 		return mgmt_cmd_status(sk, hdev->id,
4031 				       MGMT_OP_SET_PHY_CONFIGURATION,
4032 				       MGMT_STATUS_INVALID_PARAMS);
4033 
4034 	if (selected_phys == get_selected_phys(hdev))
4035 		return mgmt_cmd_complete(sk, hdev->id,
4036 					 MGMT_OP_SET_PHY_CONFIGURATION,
4037 					 0, NULL, 0);
4038 
4039 	hci_dev_lock(hdev);
4040 
4041 	if (!hdev_is_powered(hdev)) {
4042 		err = mgmt_cmd_status(sk, hdev->id,
4043 				      MGMT_OP_SET_PHY_CONFIGURATION,
4044 				      MGMT_STATUS_REJECTED);
4045 		goto unlock;
4046 	}
4047 
4048 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4049 		err = mgmt_cmd_status(sk, hdev->id,
4050 				      MGMT_OP_SET_PHY_CONFIGURATION,
4051 				      MGMT_STATUS_BUSY);
4052 		goto unlock;
4053 	}
4054 
4055 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4056 		pkt_type |= (HCI_DH3 | HCI_DM3);
4057 	else
4058 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4059 
4060 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4061 		pkt_type |= (HCI_DH5 | HCI_DM5);
4062 	else
4063 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4064 
4065 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4066 		pkt_type &= ~HCI_2DH1;
4067 	else
4068 		pkt_type |= HCI_2DH1;
4069 
4070 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4071 		pkt_type &= ~HCI_2DH3;
4072 	else
4073 		pkt_type |= HCI_2DH3;
4074 
4075 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4076 		pkt_type &= ~HCI_2DH5;
4077 	else
4078 		pkt_type |= HCI_2DH5;
4079 
4080 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4081 		pkt_type &= ~HCI_3DH1;
4082 	else
4083 		pkt_type |= HCI_3DH1;
4084 
4085 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4086 		pkt_type &= ~HCI_3DH3;
4087 	else
4088 		pkt_type |= HCI_3DH3;
4089 
4090 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4091 		pkt_type &= ~HCI_3DH5;
4092 	else
4093 		pkt_type |= HCI_3DH5;
4094 
4095 	if (pkt_type != hdev->pkt_type) {
4096 		hdev->pkt_type = pkt_type;
4097 		changed = true;
4098 	}
4099 
4100 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4101 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4102 		if (changed)
4103 			mgmt_phy_configuration_changed(hdev, sk);
4104 
4105 		err = mgmt_cmd_complete(sk, hdev->id,
4106 					MGMT_OP_SET_PHY_CONFIGURATION,
4107 					0, NULL, 0);
4108 
4109 		goto unlock;
4110 	}
4111 
4112 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4113 			       len);
4114 	if (!cmd)
4115 		err = -ENOMEM;
4116 	else
4117 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4118 					 set_default_phy_complete);
4119 
4120 	if (err < 0) {
4121 		err = mgmt_cmd_status(sk, hdev->id,
4122 				      MGMT_OP_SET_PHY_CONFIGURATION,
4123 				      MGMT_STATUS_FAILED);
4124 
4125 		if (cmd)
4126 			mgmt_pending_remove(cmd);
4127 	}
4128 
4129 unlock:
4130 	hci_dev_unlock(hdev);
4131 
4132 	return err;
4133 }
4134 
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4135 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4136 			    u16 len)
4137 {
4138 	int err = MGMT_STATUS_SUCCESS;
4139 	struct mgmt_cp_set_blocked_keys *keys = data;
4140 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4141 				   sizeof(struct mgmt_blocked_key_info));
4142 	u16 key_count, expected_len;
4143 	int i;
4144 
4145 	bt_dev_dbg(hdev, "sock %p", sk);
4146 
4147 	key_count = __le16_to_cpu(keys->key_count);
4148 	if (key_count > max_key_count) {
4149 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4150 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4151 				       MGMT_STATUS_INVALID_PARAMS);
4152 	}
4153 
4154 	expected_len = struct_size(keys, keys, key_count);
4155 	if (expected_len != len) {
4156 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4157 			   expected_len, len);
4158 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4159 				       MGMT_STATUS_INVALID_PARAMS);
4160 	}
4161 
4162 	hci_dev_lock(hdev);
4163 
4164 	hci_blocked_keys_clear(hdev);
4165 
4166 	for (i = 0; i < key_count; ++i) {
4167 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4168 
4169 		if (!b) {
4170 			err = MGMT_STATUS_NO_RESOURCES;
4171 			break;
4172 		}
4173 
4174 		b->type = keys->keys[i].type;
4175 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4176 		list_add_rcu(&b->list, &hdev->blocked_keys);
4177 	}
4178 	hci_dev_unlock(hdev);
4179 
4180 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4181 				err, NULL, 0);
4182 }
4183 
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4184 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4185 			       void *data, u16 len)
4186 {
4187 	struct mgmt_mode *cp = data;
4188 	int err;
4189 	bool changed = false;
4190 
4191 	bt_dev_dbg(hdev, "sock %p", sk);
4192 
4193 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4194 		return mgmt_cmd_status(sk, hdev->id,
4195 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4196 				       MGMT_STATUS_NOT_SUPPORTED);
4197 
4198 	if (cp->val != 0x00 && cp->val != 0x01)
4199 		return mgmt_cmd_status(sk, hdev->id,
4200 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4201 				       MGMT_STATUS_INVALID_PARAMS);
4202 
4203 	hci_dev_lock(hdev);
4204 
4205 	if (hdev_is_powered(hdev) &&
4206 	    !!cp->val != hci_dev_test_flag(hdev,
4207 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4208 		err = mgmt_cmd_status(sk, hdev->id,
4209 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4210 				      MGMT_STATUS_REJECTED);
4211 		goto unlock;
4212 	}
4213 
4214 	if (cp->val)
4215 		changed = !hci_dev_test_and_set_flag(hdev,
4216 						   HCI_WIDEBAND_SPEECH_ENABLED);
4217 	else
4218 		changed = hci_dev_test_and_clear_flag(hdev,
4219 						   HCI_WIDEBAND_SPEECH_ENABLED);
4220 
4221 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4222 	if (err < 0)
4223 		goto unlock;
4224 
4225 	if (changed)
4226 		err = new_settings(hdev, sk);
4227 
4228 unlock:
4229 	hci_dev_unlock(hdev);
4230 	return err;
4231 }
4232 
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4233 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4234 			       void *data, u16 data_len)
4235 {
4236 	char buf[20];
4237 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4238 	u16 cap_len = 0;
4239 	u8 flags = 0;
4240 	u8 tx_power_range[2];
4241 
4242 	bt_dev_dbg(hdev, "sock %p", sk);
4243 
4244 	memset(&buf, 0, sizeof(buf));
4245 
4246 	hci_dev_lock(hdev);
4247 
4248 	/* When the Read Simple Pairing Options command is supported, then
4249 	 * the remote public key validation is supported.
4250 	 *
4251 	 * Alternatively, when Microsoft extensions are available, they can
4252 	 * indicate support for public key validation as well.
4253 	 */
4254 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4255 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4256 
4257 	flags |= 0x02;		/* Remote public key validation (LE) */
4258 
4259 	/* When the Read Encryption Key Size command is supported, then the
4260 	 * encryption key size is enforced.
4261 	 */
4262 	if (hdev->commands[20] & 0x10)
4263 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4264 
4265 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4266 
4267 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4268 				  &flags, 1);
4269 
4270 	/* When the Read Simple Pairing Options command is supported, then
4271 	 * also max encryption key size information is provided.
4272 	 */
4273 	if (hdev->commands[41] & 0x08)
4274 		cap_len = eir_append_le16(rp->cap, cap_len,
4275 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4276 					  hdev->max_enc_key_size);
4277 
4278 	cap_len = eir_append_le16(rp->cap, cap_len,
4279 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4280 				  SMP_MAX_ENC_KEY_SIZE);
4281 
4282 	/* Append the min/max LE tx power parameters if we were able to fetch
4283 	 * it from the controller
4284 	 */
4285 	if (hdev->commands[38] & 0x80) {
4286 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4287 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4288 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4289 					  tx_power_range, 2);
4290 	}
4291 
4292 	rp->cap_len = cpu_to_le16(cap_len);
4293 
4294 	hci_dev_unlock(hdev);
4295 
4296 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4297 				 rp, sizeof(*rp) + cap_len);
4298 }
4299 
4300 #ifdef CONFIG_BT_FEATURE_DEBUG
4301 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4302 static const u8 debug_uuid[16] = {
4303 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4304 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4305 };
4306 #endif
4307 
4308 /* 330859bc-7506-492d-9370-9a6f0614037f */
4309 static const u8 quality_report_uuid[16] = {
4310 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4311 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4312 };
4313 
4314 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4315 static const u8 offload_codecs_uuid[16] = {
4316 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4317 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4318 };
4319 
4320 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4321 static const u8 le_simultaneous_roles_uuid[16] = {
4322 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4323 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4324 };
4325 
4326 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4327 static const u8 rpa_resolution_uuid[16] = {
4328 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4329 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4330 };
4331 
4332 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4333 static const u8 iso_socket_uuid[16] = {
4334 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4335 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4336 };
4337 
4338 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4339 static const u8 mgmt_mesh_uuid[16] = {
4340 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4341 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4342 };
4343 
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4344 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4345 				  void *data, u16 data_len)
4346 {
4347 	struct mgmt_rp_read_exp_features_info *rp;
4348 	size_t len;
4349 	u16 idx = 0;
4350 	u32 flags;
4351 	int status;
4352 
4353 	bt_dev_dbg(hdev, "sock %p", sk);
4354 
4355 	/* Enough space for 7 features */
4356 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4357 	rp = kzalloc(len, GFP_KERNEL);
4358 	if (!rp)
4359 		return -ENOMEM;
4360 
4361 #ifdef CONFIG_BT_FEATURE_DEBUG
4362 	if (!hdev) {
4363 		flags = bt_dbg_get() ? BIT(0) : 0;
4364 
4365 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4366 		rp->features[idx].flags = cpu_to_le32(flags);
4367 		idx++;
4368 	}
4369 #endif
4370 
4371 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4372 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4373 			flags = BIT(0);
4374 		else
4375 			flags = 0;
4376 
4377 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4378 		rp->features[idx].flags = cpu_to_le32(flags);
4379 		idx++;
4380 	}
4381 
4382 	if (hdev && ll_privacy_capable(hdev)) {
4383 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4384 			flags = BIT(0) | BIT(1);
4385 		else
4386 			flags = BIT(1);
4387 
4388 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4389 		rp->features[idx].flags = cpu_to_le32(flags);
4390 		idx++;
4391 	}
4392 
4393 	if (hdev && (aosp_has_quality_report(hdev) ||
4394 		     hdev->set_quality_report)) {
4395 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4396 			flags = BIT(0);
4397 		else
4398 			flags = 0;
4399 
4400 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4401 		rp->features[idx].flags = cpu_to_le32(flags);
4402 		idx++;
4403 	}
4404 
4405 	if (hdev && hdev->get_data_path_id) {
4406 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4407 			flags = BIT(0);
4408 		else
4409 			flags = 0;
4410 
4411 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4412 		rp->features[idx].flags = cpu_to_le32(flags);
4413 		idx++;
4414 	}
4415 
4416 	if (IS_ENABLED(CONFIG_BT_LE)) {
4417 		flags = iso_enabled() ? BIT(0) : 0;
4418 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4419 		rp->features[idx].flags = cpu_to_le32(flags);
4420 		idx++;
4421 	}
4422 
4423 	if (hdev && lmp_le_capable(hdev)) {
4424 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4425 			flags = BIT(0);
4426 		else
4427 			flags = 0;
4428 
4429 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4430 		rp->features[idx].flags = cpu_to_le32(flags);
4431 		idx++;
4432 	}
4433 
4434 	rp->feature_count = cpu_to_le16(idx);
4435 
4436 	/* After reading the experimental features information, enable
4437 	 * the events to update client on any future change.
4438 	 */
4439 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4440 
4441 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4442 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4443 				   0, rp, sizeof(*rp) + (20 * idx));
4444 
4445 	kfree(rp);
4446 	return status;
4447 }
4448 
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4449 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4450 					  struct sock *skip)
4451 {
4452 	struct mgmt_ev_exp_feature_changed ev;
4453 
4454 	memset(&ev, 0, sizeof(ev));
4455 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4456 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4457 
4458 	// Do we need to be atomic with the conn_flags?
4459 	if (enabled && privacy_mode_capable(hdev))
4460 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4461 	else
4462 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4463 
4464 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4465 				  &ev, sizeof(ev),
4466 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4467 
4468 }
4469 
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4470 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4471 			       bool enabled, struct sock *skip)
4472 {
4473 	struct mgmt_ev_exp_feature_changed ev;
4474 
4475 	memset(&ev, 0, sizeof(ev));
4476 	memcpy(ev.uuid, uuid, 16);
4477 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4478 
4479 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4480 				  &ev, sizeof(ev),
4481 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4482 }
4483 
4484 #define EXP_FEAT(_uuid, _set_func)	\
4485 {					\
4486 	.uuid = _uuid,			\
4487 	.set_func = _set_func,		\
4488 }
4489 
4490 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4491 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4492 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4493 {
4494 	struct mgmt_rp_set_exp_feature rp;
4495 
4496 	memset(rp.uuid, 0, 16);
4497 	rp.flags = cpu_to_le32(0);
4498 
4499 #ifdef CONFIG_BT_FEATURE_DEBUG
4500 	if (!hdev) {
4501 		bool changed = bt_dbg_get();
4502 
4503 		bt_dbg_set(false);
4504 
4505 		if (changed)
4506 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4507 	}
4508 #endif
4509 
4510 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4511 		bool changed;
4512 
4513 		changed = hci_dev_test_and_clear_flag(hdev,
4514 						      HCI_ENABLE_LL_PRIVACY);
4515 		if (changed)
4516 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4517 					    sk);
4518 	}
4519 
4520 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4521 
4522 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4523 				 MGMT_OP_SET_EXP_FEATURE, 0,
4524 				 &rp, sizeof(rp));
4525 }
4526 
4527 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4528 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4529 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4530 {
4531 	struct mgmt_rp_set_exp_feature rp;
4532 
4533 	bool val, changed;
4534 	int err;
4535 
4536 	/* Command requires to use the non-controller index */
4537 	if (hdev)
4538 		return mgmt_cmd_status(sk, hdev->id,
4539 				       MGMT_OP_SET_EXP_FEATURE,
4540 				       MGMT_STATUS_INVALID_INDEX);
4541 
4542 	/* Parameters are limited to a single octet */
4543 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4544 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4545 				       MGMT_OP_SET_EXP_FEATURE,
4546 				       MGMT_STATUS_INVALID_PARAMS);
4547 
4548 	/* Only boolean on/off is supported */
4549 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4550 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4551 				       MGMT_OP_SET_EXP_FEATURE,
4552 				       MGMT_STATUS_INVALID_PARAMS);
4553 
4554 	val = !!cp->param[0];
4555 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4556 	bt_dbg_set(val);
4557 
4558 	memcpy(rp.uuid, debug_uuid, 16);
4559 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4560 
4561 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4562 
4563 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4564 				MGMT_OP_SET_EXP_FEATURE, 0,
4565 				&rp, sizeof(rp));
4566 
4567 	if (changed)
4568 		exp_feature_changed(hdev, debug_uuid, val, sk);
4569 
4570 	return err;
4571 }
4572 #endif
4573 
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4574 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4575 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4576 {
4577 	struct mgmt_rp_set_exp_feature rp;
4578 	bool val, changed;
4579 	int err;
4580 
4581 	/* Command requires to use the controller index */
4582 	if (!hdev)
4583 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4584 				       MGMT_OP_SET_EXP_FEATURE,
4585 				       MGMT_STATUS_INVALID_INDEX);
4586 
4587 	/* Parameters are limited to a single octet */
4588 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4589 		return mgmt_cmd_status(sk, hdev->id,
4590 				       MGMT_OP_SET_EXP_FEATURE,
4591 				       MGMT_STATUS_INVALID_PARAMS);
4592 
4593 	/* Only boolean on/off is supported */
4594 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4595 		return mgmt_cmd_status(sk, hdev->id,
4596 				       MGMT_OP_SET_EXP_FEATURE,
4597 				       MGMT_STATUS_INVALID_PARAMS);
4598 
4599 	val = !!cp->param[0];
4600 
4601 	if (val) {
4602 		changed = !hci_dev_test_and_set_flag(hdev,
4603 						     HCI_MESH_EXPERIMENTAL);
4604 	} else {
4605 		hci_dev_clear_flag(hdev, HCI_MESH);
4606 		changed = hci_dev_test_and_clear_flag(hdev,
4607 						      HCI_MESH_EXPERIMENTAL);
4608 	}
4609 
4610 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4611 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4612 
4613 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4614 
4615 	err = mgmt_cmd_complete(sk, hdev->id,
4616 				MGMT_OP_SET_EXP_FEATURE, 0,
4617 				&rp, sizeof(rp));
4618 
4619 	if (changed)
4620 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4621 
4622 	return err;
4623 }
4624 
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4625 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4626 				   struct mgmt_cp_set_exp_feature *cp,
4627 				   u16 data_len)
4628 {
4629 	struct mgmt_rp_set_exp_feature rp;
4630 	bool val, changed;
4631 	int err;
4632 	u32 flags;
4633 
4634 	/* Command requires to use the controller index */
4635 	if (!hdev)
4636 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4637 				       MGMT_OP_SET_EXP_FEATURE,
4638 				       MGMT_STATUS_INVALID_INDEX);
4639 
4640 	/* Changes can only be made when controller is powered down */
4641 	if (hdev_is_powered(hdev))
4642 		return mgmt_cmd_status(sk, hdev->id,
4643 				       MGMT_OP_SET_EXP_FEATURE,
4644 				       MGMT_STATUS_REJECTED);
4645 
4646 	/* Parameters are limited to a single octet */
4647 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4648 		return mgmt_cmd_status(sk, hdev->id,
4649 				       MGMT_OP_SET_EXP_FEATURE,
4650 				       MGMT_STATUS_INVALID_PARAMS);
4651 
4652 	/* Only boolean on/off is supported */
4653 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4654 		return mgmt_cmd_status(sk, hdev->id,
4655 				       MGMT_OP_SET_EXP_FEATURE,
4656 				       MGMT_STATUS_INVALID_PARAMS);
4657 
4658 	val = !!cp->param[0];
4659 
4660 	if (val) {
4661 		changed = !hci_dev_test_and_set_flag(hdev,
4662 						     HCI_ENABLE_LL_PRIVACY);
4663 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4664 
4665 		/* Enable LL privacy + supported settings changed */
4666 		flags = BIT(0) | BIT(1);
4667 	} else {
4668 		changed = hci_dev_test_and_clear_flag(hdev,
4669 						      HCI_ENABLE_LL_PRIVACY);
4670 
4671 		/* Disable LL privacy + supported settings changed */
4672 		flags = BIT(1);
4673 	}
4674 
4675 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4676 	rp.flags = cpu_to_le32(flags);
4677 
4678 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4679 
4680 	err = mgmt_cmd_complete(sk, hdev->id,
4681 				MGMT_OP_SET_EXP_FEATURE, 0,
4682 				&rp, sizeof(rp));
4683 
4684 	if (changed)
4685 		exp_ll_privacy_feature_changed(val, hdev, sk);
4686 
4687 	return err;
4688 }
4689 
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4690 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4691 				   struct mgmt_cp_set_exp_feature *cp,
4692 				   u16 data_len)
4693 {
4694 	struct mgmt_rp_set_exp_feature rp;
4695 	bool val, changed;
4696 	int err;
4697 
4698 	/* Command requires to use a valid controller index */
4699 	if (!hdev)
4700 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4701 				       MGMT_OP_SET_EXP_FEATURE,
4702 				       MGMT_STATUS_INVALID_INDEX);
4703 
4704 	/* Parameters are limited to a single octet */
4705 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4706 		return mgmt_cmd_status(sk, hdev->id,
4707 				       MGMT_OP_SET_EXP_FEATURE,
4708 				       MGMT_STATUS_INVALID_PARAMS);
4709 
4710 	/* Only boolean on/off is supported */
4711 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4712 		return mgmt_cmd_status(sk, hdev->id,
4713 				       MGMT_OP_SET_EXP_FEATURE,
4714 				       MGMT_STATUS_INVALID_PARAMS);
4715 
4716 	hci_req_sync_lock(hdev);
4717 
4718 	val = !!cp->param[0];
4719 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4720 
4721 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4722 		err = mgmt_cmd_status(sk, hdev->id,
4723 				      MGMT_OP_SET_EXP_FEATURE,
4724 				      MGMT_STATUS_NOT_SUPPORTED);
4725 		goto unlock_quality_report;
4726 	}
4727 
4728 	if (changed) {
4729 		if (hdev->set_quality_report)
4730 			err = hdev->set_quality_report(hdev, val);
4731 		else
4732 			err = aosp_set_quality_report(hdev, val);
4733 
4734 		if (err) {
4735 			err = mgmt_cmd_status(sk, hdev->id,
4736 					      MGMT_OP_SET_EXP_FEATURE,
4737 					      MGMT_STATUS_FAILED);
4738 			goto unlock_quality_report;
4739 		}
4740 
4741 		if (val)
4742 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4743 		else
4744 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4745 	}
4746 
4747 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4748 
4749 	memcpy(rp.uuid, quality_report_uuid, 16);
4750 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4751 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4752 
4753 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4754 				&rp, sizeof(rp));
4755 
4756 	if (changed)
4757 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4758 
4759 unlock_quality_report:
4760 	hci_req_sync_unlock(hdev);
4761 	return err;
4762 }
4763 
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4764 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4765 				  struct mgmt_cp_set_exp_feature *cp,
4766 				  u16 data_len)
4767 {
4768 	bool val, changed;
4769 	int err;
4770 	struct mgmt_rp_set_exp_feature rp;
4771 
4772 	/* Command requires to use a valid controller index */
4773 	if (!hdev)
4774 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4775 				       MGMT_OP_SET_EXP_FEATURE,
4776 				       MGMT_STATUS_INVALID_INDEX);
4777 
4778 	/* Parameters are limited to a single octet */
4779 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4780 		return mgmt_cmd_status(sk, hdev->id,
4781 				       MGMT_OP_SET_EXP_FEATURE,
4782 				       MGMT_STATUS_INVALID_PARAMS);
4783 
4784 	/* Only boolean on/off is supported */
4785 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4786 		return mgmt_cmd_status(sk, hdev->id,
4787 				       MGMT_OP_SET_EXP_FEATURE,
4788 				       MGMT_STATUS_INVALID_PARAMS);
4789 
4790 	val = !!cp->param[0];
4791 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4792 
4793 	if (!hdev->get_data_path_id) {
4794 		return mgmt_cmd_status(sk, hdev->id,
4795 				       MGMT_OP_SET_EXP_FEATURE,
4796 				       MGMT_STATUS_NOT_SUPPORTED);
4797 	}
4798 
4799 	if (changed) {
4800 		if (val)
4801 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4802 		else
4803 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4804 	}
4805 
4806 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4807 		    val, changed);
4808 
4809 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4810 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4811 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4812 	err = mgmt_cmd_complete(sk, hdev->id,
4813 				MGMT_OP_SET_EXP_FEATURE, 0,
4814 				&rp, sizeof(rp));
4815 
4816 	if (changed)
4817 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4818 
4819 	return err;
4820 }
4821 
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4822 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4823 					  struct mgmt_cp_set_exp_feature *cp,
4824 					  u16 data_len)
4825 {
4826 	bool val, changed;
4827 	int err;
4828 	struct mgmt_rp_set_exp_feature rp;
4829 
4830 	/* Command requires to use a valid controller index */
4831 	if (!hdev)
4832 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4833 				       MGMT_OP_SET_EXP_FEATURE,
4834 				       MGMT_STATUS_INVALID_INDEX);
4835 
4836 	/* Parameters are limited to a single octet */
4837 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4838 		return mgmt_cmd_status(sk, hdev->id,
4839 				       MGMT_OP_SET_EXP_FEATURE,
4840 				       MGMT_STATUS_INVALID_PARAMS);
4841 
4842 	/* Only boolean on/off is supported */
4843 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4844 		return mgmt_cmd_status(sk, hdev->id,
4845 				       MGMT_OP_SET_EXP_FEATURE,
4846 				       MGMT_STATUS_INVALID_PARAMS);
4847 
4848 	val = !!cp->param[0];
4849 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4850 
4851 	if (!hci_dev_le_state_simultaneous(hdev)) {
4852 		return mgmt_cmd_status(sk, hdev->id,
4853 				       MGMT_OP_SET_EXP_FEATURE,
4854 				       MGMT_STATUS_NOT_SUPPORTED);
4855 	}
4856 
4857 	if (changed) {
4858 		if (val)
4859 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4860 		else
4861 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4862 	}
4863 
4864 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4865 		    val, changed);
4866 
4867 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4868 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4869 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4870 	err = mgmt_cmd_complete(sk, hdev->id,
4871 				MGMT_OP_SET_EXP_FEATURE, 0,
4872 				&rp, sizeof(rp));
4873 
4874 	if (changed)
4875 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4876 
4877 	return err;
4878 }
4879 
4880 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4881 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4882 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4883 {
4884 	struct mgmt_rp_set_exp_feature rp;
4885 	bool val, changed = false;
4886 	int err;
4887 
4888 	/* Command requires to use the non-controller index */
4889 	if (hdev)
4890 		return mgmt_cmd_status(sk, hdev->id,
4891 				       MGMT_OP_SET_EXP_FEATURE,
4892 				       MGMT_STATUS_INVALID_INDEX);
4893 
4894 	/* Parameters are limited to a single octet */
4895 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4896 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4897 				       MGMT_OP_SET_EXP_FEATURE,
4898 				       MGMT_STATUS_INVALID_PARAMS);
4899 
4900 	/* Only boolean on/off is supported */
4901 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4902 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4903 				       MGMT_OP_SET_EXP_FEATURE,
4904 				       MGMT_STATUS_INVALID_PARAMS);
4905 
4906 	val = cp->param[0] ? true : false;
4907 	if (val)
4908 		err = iso_init();
4909 	else
4910 		err = iso_exit();
4911 
4912 	if (!err)
4913 		changed = true;
4914 
4915 	memcpy(rp.uuid, iso_socket_uuid, 16);
4916 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4917 
4918 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4919 
4920 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4921 				MGMT_OP_SET_EXP_FEATURE, 0,
4922 				&rp, sizeof(rp));
4923 
4924 	if (changed)
4925 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4926 
4927 	return err;
4928 }
4929 #endif
4930 
4931 static const struct mgmt_exp_feature {
4932 	const u8 *uuid;
4933 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4934 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4935 } exp_features[] = {
4936 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4937 #ifdef CONFIG_BT_FEATURE_DEBUG
4938 	EXP_FEAT(debug_uuid, set_debug_func),
4939 #endif
4940 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4941 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4942 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4943 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4944 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4945 #ifdef CONFIG_BT_LE
4946 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4947 #endif
4948 
4949 	/* end with a null feature */
4950 	EXP_FEAT(NULL, NULL)
4951 };
4952 
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4953 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4954 			   void *data, u16 data_len)
4955 {
4956 	struct mgmt_cp_set_exp_feature *cp = data;
4957 	size_t i = 0;
4958 
4959 	bt_dev_dbg(hdev, "sock %p", sk);
4960 
4961 	for (i = 0; exp_features[i].uuid; i++) {
4962 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4963 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4964 	}
4965 
4966 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4967 			       MGMT_OP_SET_EXP_FEATURE,
4968 			       MGMT_STATUS_NOT_SUPPORTED);
4969 }
4970 
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4971 static u32 get_params_flags(struct hci_dev *hdev,
4972 			    struct hci_conn_params *params)
4973 {
4974 	u32 flags = hdev->conn_flags;
4975 
4976 	/* Devices using RPAs can only be programmed in the acceptlist if
4977 	 * LL Privacy has been enable otherwise they cannot mark
4978 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4979 	 */
4980 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4981 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4982 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4983 
4984 	return flags;
4985 }
4986 
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4987 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4988 			    u16 data_len)
4989 {
4990 	struct mgmt_cp_get_device_flags *cp = data;
4991 	struct mgmt_rp_get_device_flags rp;
4992 	struct bdaddr_list_with_flags *br_params;
4993 	struct hci_conn_params *params;
4994 	u32 supported_flags;
4995 	u32 current_flags = 0;
4996 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4997 
4998 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4999 		   &cp->addr.bdaddr, cp->addr.type);
5000 
5001 	hci_dev_lock(hdev);
5002 
5003 	supported_flags = hdev->conn_flags;
5004 
5005 	memset(&rp, 0, sizeof(rp));
5006 
5007 	if (cp->addr.type == BDADDR_BREDR) {
5008 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5009 							      &cp->addr.bdaddr,
5010 							      cp->addr.type);
5011 		if (!br_params)
5012 			goto done;
5013 
5014 		current_flags = br_params->flags;
5015 	} else {
5016 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5017 						le_addr_type(cp->addr.type));
5018 		if (!params)
5019 			goto done;
5020 
5021 		supported_flags = get_params_flags(hdev, params);
5022 		current_flags = params->flags;
5023 	}
5024 
5025 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5026 	rp.addr.type = cp->addr.type;
5027 	rp.supported_flags = cpu_to_le32(supported_flags);
5028 	rp.current_flags = cpu_to_le32(current_flags);
5029 
5030 	status = MGMT_STATUS_SUCCESS;
5031 
5032 done:
5033 	hci_dev_unlock(hdev);
5034 
5035 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5036 				&rp, sizeof(rp));
5037 }
5038 
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5039 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5040 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5041 				 u32 supported_flags, u32 current_flags)
5042 {
5043 	struct mgmt_ev_device_flags_changed ev;
5044 
5045 	bacpy(&ev.addr.bdaddr, bdaddr);
5046 	ev.addr.type = bdaddr_type;
5047 	ev.supported_flags = cpu_to_le32(supported_flags);
5048 	ev.current_flags = cpu_to_le32(current_flags);
5049 
5050 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5051 }
5052 
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5053 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5054 			    u16 len)
5055 {
5056 	struct mgmt_cp_set_device_flags *cp = data;
5057 	struct bdaddr_list_with_flags *br_params;
5058 	struct hci_conn_params *params;
5059 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5060 	u32 supported_flags;
5061 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5062 
5063 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5064 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5065 
5066 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5067 	supported_flags = hdev->conn_flags;
5068 
5069 	if ((supported_flags | current_flags) != supported_flags) {
5070 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5071 			    current_flags, supported_flags);
5072 		goto done;
5073 	}
5074 
5075 	hci_dev_lock(hdev);
5076 
5077 	if (cp->addr.type == BDADDR_BREDR) {
5078 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5079 							      &cp->addr.bdaddr,
5080 							      cp->addr.type);
5081 
5082 		if (br_params) {
5083 			br_params->flags = current_flags;
5084 			status = MGMT_STATUS_SUCCESS;
5085 		} else {
5086 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5087 				    &cp->addr.bdaddr, cp->addr.type);
5088 		}
5089 
5090 		goto unlock;
5091 	}
5092 
5093 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5094 					le_addr_type(cp->addr.type));
5095 	if (!params) {
5096 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5097 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5098 		goto unlock;
5099 	}
5100 
5101 	supported_flags = get_params_flags(hdev, params);
5102 
5103 	if ((supported_flags | current_flags) != supported_flags) {
5104 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5105 			    current_flags, supported_flags);
5106 		goto unlock;
5107 	}
5108 
5109 	WRITE_ONCE(params->flags, current_flags);
5110 	status = MGMT_STATUS_SUCCESS;
5111 
5112 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5113 	 * has been set.
5114 	 */
5115 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5116 		hci_update_passive_scan(hdev);
5117 
5118 unlock:
5119 	hci_dev_unlock(hdev);
5120 
5121 done:
5122 	if (status == MGMT_STATUS_SUCCESS)
5123 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5124 				     supported_flags, current_flags);
5125 
5126 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5127 				 &cp->addr, sizeof(cp->addr));
5128 }
5129 
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5130 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5131 				   u16 handle)
5132 {
5133 	struct mgmt_ev_adv_monitor_added ev;
5134 
5135 	ev.monitor_handle = cpu_to_le16(handle);
5136 
5137 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5138 }
5139 
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5140 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5141 {
5142 	struct mgmt_ev_adv_monitor_removed ev;
5143 	struct mgmt_pending_cmd *cmd;
5144 	struct sock *sk_skip = NULL;
5145 	struct mgmt_cp_remove_adv_monitor *cp;
5146 
5147 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5148 	if (cmd) {
5149 		cp = cmd->param;
5150 
5151 		if (cp->monitor_handle)
5152 			sk_skip = cmd->sk;
5153 	}
5154 
5155 	ev.monitor_handle = cpu_to_le16(handle);
5156 
5157 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5158 }
5159 
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5160 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5161 				 void *data, u16 len)
5162 {
5163 	struct adv_monitor *monitor = NULL;
5164 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5165 	int handle, err;
5166 	size_t rp_size = 0;
5167 	__u32 supported = 0;
5168 	__u32 enabled = 0;
5169 	__u16 num_handles = 0;
5170 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5171 
5172 	BT_DBG("request for %s", hdev->name);
5173 
5174 	hci_dev_lock(hdev);
5175 
5176 	if (msft_monitor_supported(hdev))
5177 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5178 
5179 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5180 		handles[num_handles++] = monitor->handle;
5181 
5182 	hci_dev_unlock(hdev);
5183 
5184 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5185 	rp = kmalloc(rp_size, GFP_KERNEL);
5186 	if (!rp)
5187 		return -ENOMEM;
5188 
5189 	/* All supported features are currently enabled */
5190 	enabled = supported;
5191 
5192 	rp->supported_features = cpu_to_le32(supported);
5193 	rp->enabled_features = cpu_to_le32(enabled);
5194 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5195 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5196 	rp->num_handles = cpu_to_le16(num_handles);
5197 	if (num_handles)
5198 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5199 
5200 	err = mgmt_cmd_complete(sk, hdev->id,
5201 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5202 				MGMT_STATUS_SUCCESS, rp, rp_size);
5203 
5204 	kfree(rp);
5205 
5206 	return err;
5207 }
5208 
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5209 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5210 						   void *data, int status)
5211 {
5212 	struct mgmt_rp_add_adv_patterns_monitor rp;
5213 	struct mgmt_pending_cmd *cmd = data;
5214 	struct adv_monitor *monitor = cmd->user_data;
5215 
5216 	hci_dev_lock(hdev);
5217 
5218 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5219 
5220 	if (!status) {
5221 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5222 		hdev->adv_monitors_cnt++;
5223 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5224 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5225 		hci_update_passive_scan(hdev);
5226 	}
5227 
5228 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5229 			  mgmt_status(status), &rp, sizeof(rp));
5230 	mgmt_pending_remove(cmd);
5231 
5232 	hci_dev_unlock(hdev);
5233 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5234 		   rp.monitor_handle, status);
5235 }
5236 
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5237 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5238 {
5239 	struct mgmt_pending_cmd *cmd = data;
5240 	struct adv_monitor *monitor = cmd->user_data;
5241 
5242 	return hci_add_adv_monitor(hdev, monitor);
5243 }
5244 
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5245 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5246 				      struct adv_monitor *m, u8 status,
5247 				      void *data, u16 len, u16 op)
5248 {
5249 	struct mgmt_pending_cmd *cmd;
5250 	int err;
5251 
5252 	hci_dev_lock(hdev);
5253 
5254 	if (status)
5255 		goto unlock;
5256 
5257 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5258 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5259 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5260 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5261 		status = MGMT_STATUS_BUSY;
5262 		goto unlock;
5263 	}
5264 
5265 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5266 	if (!cmd) {
5267 		status = MGMT_STATUS_NO_RESOURCES;
5268 		goto unlock;
5269 	}
5270 
5271 	cmd->user_data = m;
5272 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5273 				 mgmt_add_adv_patterns_monitor_complete);
5274 	if (err) {
5275 		if (err == -ENOMEM)
5276 			status = MGMT_STATUS_NO_RESOURCES;
5277 		else
5278 			status = MGMT_STATUS_FAILED;
5279 
5280 		goto unlock;
5281 	}
5282 
5283 	hci_dev_unlock(hdev);
5284 
5285 	return 0;
5286 
5287 unlock:
5288 	hci_free_adv_monitor(hdev, m);
5289 	hci_dev_unlock(hdev);
5290 	return mgmt_cmd_status(sk, hdev->id, op, status);
5291 }
5292 
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5293 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5294 				   struct mgmt_adv_rssi_thresholds *rssi)
5295 {
5296 	if (rssi) {
5297 		m->rssi.low_threshold = rssi->low_threshold;
5298 		m->rssi.low_threshold_timeout =
5299 		    __le16_to_cpu(rssi->low_threshold_timeout);
5300 		m->rssi.high_threshold = rssi->high_threshold;
5301 		m->rssi.high_threshold_timeout =
5302 		    __le16_to_cpu(rssi->high_threshold_timeout);
5303 		m->rssi.sampling_period = rssi->sampling_period;
5304 	} else {
5305 		/* Default values. These numbers are the least constricting
5306 		 * parameters for MSFT API to work, so it behaves as if there
5307 		 * are no rssi parameter to consider. May need to be changed
5308 		 * if other API are to be supported.
5309 		 */
5310 		m->rssi.low_threshold = -127;
5311 		m->rssi.low_threshold_timeout = 60;
5312 		m->rssi.high_threshold = -127;
5313 		m->rssi.high_threshold_timeout = 0;
5314 		m->rssi.sampling_period = 0;
5315 	}
5316 }
5317 
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5318 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5319 				    struct mgmt_adv_pattern *patterns)
5320 {
5321 	u8 offset = 0, length = 0;
5322 	struct adv_pattern *p = NULL;
5323 	int i;
5324 
5325 	for (i = 0; i < pattern_count; i++) {
5326 		offset = patterns[i].offset;
5327 		length = patterns[i].length;
5328 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5329 		    length > HCI_MAX_EXT_AD_LENGTH ||
5330 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5331 			return MGMT_STATUS_INVALID_PARAMS;
5332 
5333 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5334 		if (!p)
5335 			return MGMT_STATUS_NO_RESOURCES;
5336 
5337 		p->ad_type = patterns[i].ad_type;
5338 		p->offset = patterns[i].offset;
5339 		p->length = patterns[i].length;
5340 		memcpy(p->value, patterns[i].value, p->length);
5341 
5342 		INIT_LIST_HEAD(&p->list);
5343 		list_add(&p->list, &m->patterns);
5344 	}
5345 
5346 	return MGMT_STATUS_SUCCESS;
5347 }
5348 
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5349 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5350 				    void *data, u16 len)
5351 {
5352 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5353 	struct adv_monitor *m = NULL;
5354 	u8 status = MGMT_STATUS_SUCCESS;
5355 	size_t expected_size = sizeof(*cp);
5356 
5357 	BT_DBG("request for %s", hdev->name);
5358 
5359 	if (len <= sizeof(*cp)) {
5360 		status = MGMT_STATUS_INVALID_PARAMS;
5361 		goto done;
5362 	}
5363 
5364 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5365 	if (len != expected_size) {
5366 		status = MGMT_STATUS_INVALID_PARAMS;
5367 		goto done;
5368 	}
5369 
5370 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5371 	if (!m) {
5372 		status = MGMT_STATUS_NO_RESOURCES;
5373 		goto done;
5374 	}
5375 
5376 	INIT_LIST_HEAD(&m->patterns);
5377 
5378 	parse_adv_monitor_rssi(m, NULL);
5379 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5380 
5381 done:
5382 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5383 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5384 }
5385 
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5386 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5387 					 void *data, u16 len)
5388 {
5389 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5390 	struct adv_monitor *m = NULL;
5391 	u8 status = MGMT_STATUS_SUCCESS;
5392 	size_t expected_size = sizeof(*cp);
5393 
5394 	BT_DBG("request for %s", hdev->name);
5395 
5396 	if (len <= sizeof(*cp)) {
5397 		status = MGMT_STATUS_INVALID_PARAMS;
5398 		goto done;
5399 	}
5400 
5401 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5402 	if (len != expected_size) {
5403 		status = MGMT_STATUS_INVALID_PARAMS;
5404 		goto done;
5405 	}
5406 
5407 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5408 	if (!m) {
5409 		status = MGMT_STATUS_NO_RESOURCES;
5410 		goto done;
5411 	}
5412 
5413 	INIT_LIST_HEAD(&m->patterns);
5414 
5415 	parse_adv_monitor_rssi(m, &cp->rssi);
5416 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5417 
5418 done:
5419 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5420 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5421 }
5422 
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5423 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5424 					     void *data, int status)
5425 {
5426 	struct mgmt_rp_remove_adv_monitor rp;
5427 	struct mgmt_pending_cmd *cmd = data;
5428 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5429 
5430 	hci_dev_lock(hdev);
5431 
5432 	rp.monitor_handle = cp->monitor_handle;
5433 
5434 	if (!status)
5435 		hci_update_passive_scan(hdev);
5436 
5437 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5438 			  mgmt_status(status), &rp, sizeof(rp));
5439 	mgmt_pending_remove(cmd);
5440 
5441 	hci_dev_unlock(hdev);
5442 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5443 		   rp.monitor_handle, status);
5444 }
5445 
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5446 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5447 {
5448 	struct mgmt_pending_cmd *cmd = data;
5449 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5450 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5451 
5452 	if (!handle)
5453 		return hci_remove_all_adv_monitor(hdev);
5454 
5455 	return hci_remove_single_adv_monitor(hdev, handle);
5456 }
5457 
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5458 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5459 			      void *data, u16 len)
5460 {
5461 	struct mgmt_pending_cmd *cmd;
5462 	int err, status;
5463 
5464 	hci_dev_lock(hdev);
5465 
5466 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5467 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5468 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5469 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5470 		status = MGMT_STATUS_BUSY;
5471 		goto unlock;
5472 	}
5473 
5474 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5475 	if (!cmd) {
5476 		status = MGMT_STATUS_NO_RESOURCES;
5477 		goto unlock;
5478 	}
5479 
5480 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5481 				  mgmt_remove_adv_monitor_complete);
5482 
5483 	if (err) {
5484 		mgmt_pending_remove(cmd);
5485 
5486 		if (err == -ENOMEM)
5487 			status = MGMT_STATUS_NO_RESOURCES;
5488 		else
5489 			status = MGMT_STATUS_FAILED;
5490 
5491 		goto unlock;
5492 	}
5493 
5494 	hci_dev_unlock(hdev);
5495 
5496 	return 0;
5497 
5498 unlock:
5499 	hci_dev_unlock(hdev);
5500 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5501 			       status);
5502 }
5503 
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5504 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5505 {
5506 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5507 	size_t rp_size = sizeof(mgmt_rp);
5508 	struct mgmt_pending_cmd *cmd = data;
5509 	struct sk_buff *skb = cmd->skb;
5510 	u8 status = mgmt_status(err);
5511 
5512 	if (!status) {
5513 		if (!skb)
5514 			status = MGMT_STATUS_FAILED;
5515 		else if (IS_ERR(skb))
5516 			status = mgmt_status(PTR_ERR(skb));
5517 		else
5518 			status = mgmt_status(skb->data[0]);
5519 	}
5520 
5521 	bt_dev_dbg(hdev, "status %d", status);
5522 
5523 	if (status) {
5524 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5525 		goto remove;
5526 	}
5527 
5528 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5529 
5530 	if (!bredr_sc_enabled(hdev)) {
5531 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5532 
5533 		if (skb->len < sizeof(*rp)) {
5534 			mgmt_cmd_status(cmd->sk, hdev->id,
5535 					MGMT_OP_READ_LOCAL_OOB_DATA,
5536 					MGMT_STATUS_FAILED);
5537 			goto remove;
5538 		}
5539 
5540 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5541 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5542 
5543 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5544 	} else {
5545 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5546 
5547 		if (skb->len < sizeof(*rp)) {
5548 			mgmt_cmd_status(cmd->sk, hdev->id,
5549 					MGMT_OP_READ_LOCAL_OOB_DATA,
5550 					MGMT_STATUS_FAILED);
5551 			goto remove;
5552 		}
5553 
5554 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5555 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5556 
5557 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5558 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5559 	}
5560 
5561 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5562 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5563 
5564 remove:
5565 	if (skb && !IS_ERR(skb))
5566 		kfree_skb(skb);
5567 
5568 	mgmt_pending_free(cmd);
5569 }
5570 
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5571 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5572 {
5573 	struct mgmt_pending_cmd *cmd = data;
5574 
5575 	if (bredr_sc_enabled(hdev))
5576 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5577 	else
5578 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5579 
5580 	if (IS_ERR(cmd->skb))
5581 		return PTR_ERR(cmd->skb);
5582 	else
5583 		return 0;
5584 }
5585 
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5586 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5587 			       void *data, u16 data_len)
5588 {
5589 	struct mgmt_pending_cmd *cmd;
5590 	int err;
5591 
5592 	bt_dev_dbg(hdev, "sock %p", sk);
5593 
5594 	hci_dev_lock(hdev);
5595 
5596 	if (!hdev_is_powered(hdev)) {
5597 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5598 				      MGMT_STATUS_NOT_POWERED);
5599 		goto unlock;
5600 	}
5601 
5602 	if (!lmp_ssp_capable(hdev)) {
5603 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5604 				      MGMT_STATUS_NOT_SUPPORTED);
5605 		goto unlock;
5606 	}
5607 
5608 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5609 	if (!cmd)
5610 		err = -ENOMEM;
5611 	else
5612 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5613 					 read_local_oob_data_complete);
5614 
5615 	if (err < 0) {
5616 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5617 				      MGMT_STATUS_FAILED);
5618 
5619 		if (cmd)
5620 			mgmt_pending_free(cmd);
5621 	}
5622 
5623 unlock:
5624 	hci_dev_unlock(hdev);
5625 	return err;
5626 }
5627 
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5628 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5629 			       void *data, u16 len)
5630 {
5631 	struct mgmt_addr_info *addr = data;
5632 	int err;
5633 
5634 	bt_dev_dbg(hdev, "sock %p", sk);
5635 
5636 	if (!bdaddr_type_is_valid(addr->type))
5637 		return mgmt_cmd_complete(sk, hdev->id,
5638 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5639 					 MGMT_STATUS_INVALID_PARAMS,
5640 					 addr, sizeof(*addr));
5641 
5642 	hci_dev_lock(hdev);
5643 
5644 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5645 		struct mgmt_cp_add_remote_oob_data *cp = data;
5646 		u8 status;
5647 
5648 		if (cp->addr.type != BDADDR_BREDR) {
5649 			err = mgmt_cmd_complete(sk, hdev->id,
5650 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5651 						MGMT_STATUS_INVALID_PARAMS,
5652 						&cp->addr, sizeof(cp->addr));
5653 			goto unlock;
5654 		}
5655 
5656 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5657 					      cp->addr.type, cp->hash,
5658 					      cp->rand, NULL, NULL);
5659 		if (err < 0)
5660 			status = MGMT_STATUS_FAILED;
5661 		else
5662 			status = MGMT_STATUS_SUCCESS;
5663 
5664 		err = mgmt_cmd_complete(sk, hdev->id,
5665 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5666 					&cp->addr, sizeof(cp->addr));
5667 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5668 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5669 		u8 *rand192, *hash192, *rand256, *hash256;
5670 		u8 status;
5671 
5672 		if (bdaddr_type_is_le(cp->addr.type)) {
5673 			/* Enforce zero-valued 192-bit parameters as
5674 			 * long as legacy SMP OOB isn't implemented.
5675 			 */
5676 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5677 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5678 				err = mgmt_cmd_complete(sk, hdev->id,
5679 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5680 							MGMT_STATUS_INVALID_PARAMS,
5681 							addr, sizeof(*addr));
5682 				goto unlock;
5683 			}
5684 
5685 			rand192 = NULL;
5686 			hash192 = NULL;
5687 		} else {
5688 			/* In case one of the P-192 values is set to zero,
5689 			 * then just disable OOB data for P-192.
5690 			 */
5691 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5692 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5693 				rand192 = NULL;
5694 				hash192 = NULL;
5695 			} else {
5696 				rand192 = cp->rand192;
5697 				hash192 = cp->hash192;
5698 			}
5699 		}
5700 
5701 		/* In case one of the P-256 values is set to zero, then just
5702 		 * disable OOB data for P-256.
5703 		 */
5704 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5705 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5706 			rand256 = NULL;
5707 			hash256 = NULL;
5708 		} else {
5709 			rand256 = cp->rand256;
5710 			hash256 = cp->hash256;
5711 		}
5712 
5713 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5714 					      cp->addr.type, hash192, rand192,
5715 					      hash256, rand256);
5716 		if (err < 0)
5717 			status = MGMT_STATUS_FAILED;
5718 		else
5719 			status = MGMT_STATUS_SUCCESS;
5720 
5721 		err = mgmt_cmd_complete(sk, hdev->id,
5722 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5723 					status, &cp->addr, sizeof(cp->addr));
5724 	} else {
5725 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5726 			   len);
5727 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5728 				      MGMT_STATUS_INVALID_PARAMS);
5729 	}
5730 
5731 unlock:
5732 	hci_dev_unlock(hdev);
5733 	return err;
5734 }
5735 
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5736 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5737 				  void *data, u16 len)
5738 {
5739 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5740 	u8 status;
5741 	int err;
5742 
5743 	bt_dev_dbg(hdev, "sock %p", sk);
5744 
5745 	if (cp->addr.type != BDADDR_BREDR)
5746 		return mgmt_cmd_complete(sk, hdev->id,
5747 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5748 					 MGMT_STATUS_INVALID_PARAMS,
5749 					 &cp->addr, sizeof(cp->addr));
5750 
5751 	hci_dev_lock(hdev);
5752 
5753 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5754 		hci_remote_oob_data_clear(hdev);
5755 		status = MGMT_STATUS_SUCCESS;
5756 		goto done;
5757 	}
5758 
5759 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5760 	if (err < 0)
5761 		status = MGMT_STATUS_INVALID_PARAMS;
5762 	else
5763 		status = MGMT_STATUS_SUCCESS;
5764 
5765 done:
5766 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5767 				status, &cp->addr, sizeof(cp->addr));
5768 
5769 	hci_dev_unlock(hdev);
5770 	return err;
5771 }
5772 
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5773 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5774 {
5775 	struct mgmt_pending_cmd *cmd;
5776 
5777 	bt_dev_dbg(hdev, "status %u", status);
5778 
5779 	hci_dev_lock(hdev);
5780 
5781 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5782 	if (!cmd)
5783 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5784 
5785 	if (!cmd)
5786 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5787 
5788 	if (cmd) {
5789 		cmd->cmd_complete(cmd, mgmt_status(status));
5790 		mgmt_pending_remove(cmd);
5791 	}
5792 
5793 	hci_dev_unlock(hdev);
5794 }
5795 
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5796 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5797 				    uint8_t *mgmt_status)
5798 {
5799 	switch (type) {
5800 	case DISCOV_TYPE_LE:
5801 		*mgmt_status = mgmt_le_support(hdev);
5802 		if (*mgmt_status)
5803 			return false;
5804 		break;
5805 	case DISCOV_TYPE_INTERLEAVED:
5806 		*mgmt_status = mgmt_le_support(hdev);
5807 		if (*mgmt_status)
5808 			return false;
5809 		fallthrough;
5810 	case DISCOV_TYPE_BREDR:
5811 		*mgmt_status = mgmt_bredr_support(hdev);
5812 		if (*mgmt_status)
5813 			return false;
5814 		break;
5815 	default:
5816 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5817 		return false;
5818 	}
5819 
5820 	return true;
5821 }
5822 
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5823 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5824 {
5825 	struct mgmt_pending_cmd *cmd = data;
5826 
5827 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5828 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5829 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5830 		return;
5831 
5832 	bt_dev_dbg(hdev, "err %d", err);
5833 
5834 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5835 			  cmd->param, 1);
5836 	mgmt_pending_remove(cmd);
5837 
5838 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5839 				DISCOVERY_FINDING);
5840 }
5841 
start_discovery_sync(struct hci_dev * hdev,void * data)5842 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5843 {
5844 	return hci_start_discovery_sync(hdev);
5845 }
5846 
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5847 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5848 				    u16 op, void *data, u16 len)
5849 {
5850 	struct mgmt_cp_start_discovery *cp = data;
5851 	struct mgmt_pending_cmd *cmd;
5852 	u8 status;
5853 	int err;
5854 
5855 	bt_dev_dbg(hdev, "sock %p", sk);
5856 
5857 	hci_dev_lock(hdev);
5858 
5859 	if (!hdev_is_powered(hdev)) {
5860 		err = mgmt_cmd_complete(sk, hdev->id, op,
5861 					MGMT_STATUS_NOT_POWERED,
5862 					&cp->type, sizeof(cp->type));
5863 		goto failed;
5864 	}
5865 
5866 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5867 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5868 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5869 					&cp->type, sizeof(cp->type));
5870 		goto failed;
5871 	}
5872 
5873 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5874 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5875 					&cp->type, sizeof(cp->type));
5876 		goto failed;
5877 	}
5878 
5879 	/* Can't start discovery when it is paused */
5880 	if (hdev->discovery_paused) {
5881 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5882 					&cp->type, sizeof(cp->type));
5883 		goto failed;
5884 	}
5885 
5886 	/* Clear the discovery filter first to free any previously
5887 	 * allocated memory for the UUID list.
5888 	 */
5889 	hci_discovery_filter_clear(hdev);
5890 
5891 	hdev->discovery.type = cp->type;
5892 	hdev->discovery.report_invalid_rssi = false;
5893 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5894 		hdev->discovery.limited = true;
5895 	else
5896 		hdev->discovery.limited = false;
5897 
5898 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5899 	if (!cmd) {
5900 		err = -ENOMEM;
5901 		goto failed;
5902 	}
5903 
5904 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5905 				 start_discovery_complete);
5906 	if (err < 0) {
5907 		mgmt_pending_remove(cmd);
5908 		goto failed;
5909 	}
5910 
5911 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5912 
5913 failed:
5914 	hci_dev_unlock(hdev);
5915 	return err;
5916 }
5917 
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5918 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5919 			   void *data, u16 len)
5920 {
5921 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5922 					data, len);
5923 }
5924 
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5925 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5926 				   void *data, u16 len)
5927 {
5928 	return start_discovery_internal(sk, hdev,
5929 					MGMT_OP_START_LIMITED_DISCOVERY,
5930 					data, len);
5931 }
5932 
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5933 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5934 				   void *data, u16 len)
5935 {
5936 	struct mgmt_cp_start_service_discovery *cp = data;
5937 	struct mgmt_pending_cmd *cmd;
5938 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5939 	u16 uuid_count, expected_len;
5940 	u8 status;
5941 	int err;
5942 
5943 	bt_dev_dbg(hdev, "sock %p", sk);
5944 
5945 	hci_dev_lock(hdev);
5946 
5947 	if (!hdev_is_powered(hdev)) {
5948 		err = mgmt_cmd_complete(sk, hdev->id,
5949 					MGMT_OP_START_SERVICE_DISCOVERY,
5950 					MGMT_STATUS_NOT_POWERED,
5951 					&cp->type, sizeof(cp->type));
5952 		goto failed;
5953 	}
5954 
5955 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5956 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5957 		err = mgmt_cmd_complete(sk, hdev->id,
5958 					MGMT_OP_START_SERVICE_DISCOVERY,
5959 					MGMT_STATUS_BUSY, &cp->type,
5960 					sizeof(cp->type));
5961 		goto failed;
5962 	}
5963 
5964 	if (hdev->discovery_paused) {
5965 		err = mgmt_cmd_complete(sk, hdev->id,
5966 					MGMT_OP_START_SERVICE_DISCOVERY,
5967 					MGMT_STATUS_BUSY, &cp->type,
5968 					sizeof(cp->type));
5969 		goto failed;
5970 	}
5971 
5972 	uuid_count = __le16_to_cpu(cp->uuid_count);
5973 	if (uuid_count > max_uuid_count) {
5974 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5975 			   uuid_count);
5976 		err = mgmt_cmd_complete(sk, hdev->id,
5977 					MGMT_OP_START_SERVICE_DISCOVERY,
5978 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5979 					sizeof(cp->type));
5980 		goto failed;
5981 	}
5982 
5983 	expected_len = sizeof(*cp) + uuid_count * 16;
5984 	if (expected_len != len) {
5985 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5986 			   expected_len, len);
5987 		err = mgmt_cmd_complete(sk, hdev->id,
5988 					MGMT_OP_START_SERVICE_DISCOVERY,
5989 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5990 					sizeof(cp->type));
5991 		goto failed;
5992 	}
5993 
5994 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5995 		err = mgmt_cmd_complete(sk, hdev->id,
5996 					MGMT_OP_START_SERVICE_DISCOVERY,
5997 					status, &cp->type, sizeof(cp->type));
5998 		goto failed;
5999 	}
6000 
6001 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6002 			       hdev, data, len);
6003 	if (!cmd) {
6004 		err = -ENOMEM;
6005 		goto failed;
6006 	}
6007 
6008 	/* Clear the discovery filter first to free any previously
6009 	 * allocated memory for the UUID list.
6010 	 */
6011 	hci_discovery_filter_clear(hdev);
6012 
6013 	hdev->discovery.result_filtering = true;
6014 	hdev->discovery.type = cp->type;
6015 	hdev->discovery.rssi = cp->rssi;
6016 	hdev->discovery.uuid_count = uuid_count;
6017 
6018 	if (uuid_count > 0) {
6019 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6020 						GFP_KERNEL);
6021 		if (!hdev->discovery.uuids) {
6022 			err = mgmt_cmd_complete(sk, hdev->id,
6023 						MGMT_OP_START_SERVICE_DISCOVERY,
6024 						MGMT_STATUS_FAILED,
6025 						&cp->type, sizeof(cp->type));
6026 			mgmt_pending_remove(cmd);
6027 			goto failed;
6028 		}
6029 	}
6030 
6031 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6032 				 start_discovery_complete);
6033 	if (err < 0) {
6034 		mgmt_pending_remove(cmd);
6035 		goto failed;
6036 	}
6037 
6038 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6039 
6040 failed:
6041 	hci_dev_unlock(hdev);
6042 	return err;
6043 }
6044 
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6045 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6046 {
6047 	struct mgmt_pending_cmd *cmd;
6048 
6049 	bt_dev_dbg(hdev, "status %u", status);
6050 
6051 	hci_dev_lock(hdev);
6052 
6053 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6054 	if (cmd) {
6055 		cmd->cmd_complete(cmd, mgmt_status(status));
6056 		mgmt_pending_remove(cmd);
6057 	}
6058 
6059 	hci_dev_unlock(hdev);
6060 }
6061 
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6062 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6063 {
6064 	struct mgmt_pending_cmd *cmd = data;
6065 
6066 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6067 		return;
6068 
6069 	bt_dev_dbg(hdev, "err %d", err);
6070 
6071 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6072 			  cmd->param, 1);
6073 	mgmt_pending_remove(cmd);
6074 
6075 	if (!err)
6076 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6077 }
6078 
stop_discovery_sync(struct hci_dev * hdev,void * data)6079 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6080 {
6081 	return hci_stop_discovery_sync(hdev);
6082 }
6083 
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6084 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6085 			  u16 len)
6086 {
6087 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6088 	struct mgmt_pending_cmd *cmd;
6089 	int err;
6090 
6091 	bt_dev_dbg(hdev, "sock %p", sk);
6092 
6093 	hci_dev_lock(hdev);
6094 
6095 	if (!hci_discovery_active(hdev)) {
6096 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6097 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6098 					sizeof(mgmt_cp->type));
6099 		goto unlock;
6100 	}
6101 
6102 	if (hdev->discovery.type != mgmt_cp->type) {
6103 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6104 					MGMT_STATUS_INVALID_PARAMS,
6105 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6106 		goto unlock;
6107 	}
6108 
6109 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6110 	if (!cmd) {
6111 		err = -ENOMEM;
6112 		goto unlock;
6113 	}
6114 
6115 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6116 				 stop_discovery_complete);
6117 	if (err < 0) {
6118 		mgmt_pending_remove(cmd);
6119 		goto unlock;
6120 	}
6121 
6122 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6123 
6124 unlock:
6125 	hci_dev_unlock(hdev);
6126 	return err;
6127 }
6128 
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6129 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6130 			u16 len)
6131 {
6132 	struct mgmt_cp_confirm_name *cp = data;
6133 	struct inquiry_entry *e;
6134 	int err;
6135 
6136 	bt_dev_dbg(hdev, "sock %p", sk);
6137 
6138 	hci_dev_lock(hdev);
6139 
6140 	if (!hci_discovery_active(hdev)) {
6141 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6142 					MGMT_STATUS_FAILED, &cp->addr,
6143 					sizeof(cp->addr));
6144 		goto failed;
6145 	}
6146 
6147 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6148 	if (!e) {
6149 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6150 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6151 					sizeof(cp->addr));
6152 		goto failed;
6153 	}
6154 
6155 	if (cp->name_known) {
6156 		e->name_state = NAME_KNOWN;
6157 		list_del(&e->list);
6158 	} else {
6159 		e->name_state = NAME_NEEDED;
6160 		hci_inquiry_cache_update_resolve(hdev, e);
6161 	}
6162 
6163 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6164 				&cp->addr, sizeof(cp->addr));
6165 
6166 failed:
6167 	hci_dev_unlock(hdev);
6168 	return err;
6169 }
6170 
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6171 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6172 			u16 len)
6173 {
6174 	struct mgmt_cp_block_device *cp = data;
6175 	u8 status;
6176 	int err;
6177 
6178 	bt_dev_dbg(hdev, "sock %p", sk);
6179 
6180 	if (!bdaddr_type_is_valid(cp->addr.type))
6181 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6182 					 MGMT_STATUS_INVALID_PARAMS,
6183 					 &cp->addr, sizeof(cp->addr));
6184 
6185 	hci_dev_lock(hdev);
6186 
6187 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6188 				  cp->addr.type);
6189 	if (err < 0) {
6190 		status = MGMT_STATUS_FAILED;
6191 		goto done;
6192 	}
6193 
6194 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6195 		   sk);
6196 	status = MGMT_STATUS_SUCCESS;
6197 
6198 done:
6199 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6200 				&cp->addr, sizeof(cp->addr));
6201 
6202 	hci_dev_unlock(hdev);
6203 
6204 	return err;
6205 }
6206 
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6207 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6208 			  u16 len)
6209 {
6210 	struct mgmt_cp_unblock_device *cp = data;
6211 	u8 status;
6212 	int err;
6213 
6214 	bt_dev_dbg(hdev, "sock %p", sk);
6215 
6216 	if (!bdaddr_type_is_valid(cp->addr.type))
6217 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6218 					 MGMT_STATUS_INVALID_PARAMS,
6219 					 &cp->addr, sizeof(cp->addr));
6220 
6221 	hci_dev_lock(hdev);
6222 
6223 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6224 				  cp->addr.type);
6225 	if (err < 0) {
6226 		status = MGMT_STATUS_INVALID_PARAMS;
6227 		goto done;
6228 	}
6229 
6230 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6231 		   sk);
6232 	status = MGMT_STATUS_SUCCESS;
6233 
6234 done:
6235 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6236 				&cp->addr, sizeof(cp->addr));
6237 
6238 	hci_dev_unlock(hdev);
6239 
6240 	return err;
6241 }
6242 
set_device_id_sync(struct hci_dev * hdev,void * data)6243 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6244 {
6245 	return hci_update_eir_sync(hdev);
6246 }
6247 
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6248 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6249 			 u16 len)
6250 {
6251 	struct mgmt_cp_set_device_id *cp = data;
6252 	int err;
6253 	__u16 source;
6254 
6255 	bt_dev_dbg(hdev, "sock %p", sk);
6256 
6257 	source = __le16_to_cpu(cp->source);
6258 
6259 	if (source > 0x0002)
6260 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6261 				       MGMT_STATUS_INVALID_PARAMS);
6262 
6263 	hci_dev_lock(hdev);
6264 
6265 	hdev->devid_source = source;
6266 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6267 	hdev->devid_product = __le16_to_cpu(cp->product);
6268 	hdev->devid_version = __le16_to_cpu(cp->version);
6269 
6270 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6271 				NULL, 0);
6272 
6273 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6274 
6275 	hci_dev_unlock(hdev);
6276 
6277 	return err;
6278 }
6279 
enable_advertising_instance(struct hci_dev * hdev,int err)6280 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6281 {
6282 	if (err)
6283 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6284 	else
6285 		bt_dev_dbg(hdev, "status %d", err);
6286 }
6287 
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6288 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6289 {
6290 	struct cmd_lookup match = { NULL, hdev };
6291 	u8 instance;
6292 	struct adv_info *adv_instance;
6293 	u8 status = mgmt_status(err);
6294 
6295 	if (status) {
6296 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6297 				     cmd_status_rsp, &status);
6298 		return;
6299 	}
6300 
6301 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6302 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6303 	else
6304 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6305 
6306 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6307 			     &match);
6308 
6309 	new_settings(hdev, match.sk);
6310 
6311 	if (match.sk)
6312 		sock_put(match.sk);
6313 
6314 	/* If "Set Advertising" was just disabled and instance advertising was
6315 	 * set up earlier, then re-enable multi-instance advertising.
6316 	 */
6317 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6318 	    list_empty(&hdev->adv_instances))
6319 		return;
6320 
6321 	instance = hdev->cur_adv_instance;
6322 	if (!instance) {
6323 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6324 							struct adv_info, list);
6325 		if (!adv_instance)
6326 			return;
6327 
6328 		instance = adv_instance->instance;
6329 	}
6330 
6331 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6332 
6333 	enable_advertising_instance(hdev, err);
6334 }
6335 
set_adv_sync(struct hci_dev * hdev,void * data)6336 static int set_adv_sync(struct hci_dev *hdev, void *data)
6337 {
6338 	struct mgmt_pending_cmd *cmd = data;
6339 	struct mgmt_mode *cp = cmd->param;
6340 	u8 val = !!cp->val;
6341 
6342 	if (cp->val == 0x02)
6343 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6344 	else
6345 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6346 
6347 	cancel_adv_timeout(hdev);
6348 
6349 	if (val) {
6350 		/* Switch to instance "0" for the Set Advertising setting.
6351 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6352 		 * HCI_ADVERTISING flag is not yet set.
6353 		 */
6354 		hdev->cur_adv_instance = 0x00;
6355 
6356 		if (ext_adv_capable(hdev)) {
6357 			hci_start_ext_adv_sync(hdev, 0x00);
6358 		} else {
6359 			hci_update_adv_data_sync(hdev, 0x00);
6360 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6361 			hci_enable_advertising_sync(hdev);
6362 		}
6363 	} else {
6364 		hci_disable_advertising_sync(hdev);
6365 	}
6366 
6367 	return 0;
6368 }
6369 
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6370 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6371 			   u16 len)
6372 {
6373 	struct mgmt_mode *cp = data;
6374 	struct mgmt_pending_cmd *cmd;
6375 	u8 val, status;
6376 	int err;
6377 
6378 	bt_dev_dbg(hdev, "sock %p", sk);
6379 
6380 	status = mgmt_le_support(hdev);
6381 	if (status)
6382 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6383 				       status);
6384 
6385 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6386 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6387 				       MGMT_STATUS_INVALID_PARAMS);
6388 
6389 	if (hdev->advertising_paused)
6390 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6391 				       MGMT_STATUS_BUSY);
6392 
6393 	hci_dev_lock(hdev);
6394 
6395 	val = !!cp->val;
6396 
6397 	/* The following conditions are ones which mean that we should
6398 	 * not do any HCI communication but directly send a mgmt
6399 	 * response to user space (after toggling the flag if
6400 	 * necessary).
6401 	 */
6402 	if (!hdev_is_powered(hdev) ||
6403 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6404 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6405 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6406 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6407 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6408 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6409 		bool changed;
6410 
6411 		if (cp->val) {
6412 			hdev->cur_adv_instance = 0x00;
6413 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6414 			if (cp->val == 0x02)
6415 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6416 			else
6417 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6418 		} else {
6419 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6420 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6421 		}
6422 
6423 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6424 		if (err < 0)
6425 			goto unlock;
6426 
6427 		if (changed)
6428 			err = new_settings(hdev, sk);
6429 
6430 		goto unlock;
6431 	}
6432 
6433 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6434 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6435 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6436 				      MGMT_STATUS_BUSY);
6437 		goto unlock;
6438 	}
6439 
6440 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6441 	if (!cmd)
6442 		err = -ENOMEM;
6443 	else
6444 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6445 					 set_advertising_complete);
6446 
6447 	if (err < 0 && cmd)
6448 		mgmt_pending_remove(cmd);
6449 
6450 unlock:
6451 	hci_dev_unlock(hdev);
6452 	return err;
6453 }
6454 
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6455 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6456 			      void *data, u16 len)
6457 {
6458 	struct mgmt_cp_set_static_address *cp = data;
6459 	int err;
6460 
6461 	bt_dev_dbg(hdev, "sock %p", sk);
6462 
6463 	if (!lmp_le_capable(hdev))
6464 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6465 				       MGMT_STATUS_NOT_SUPPORTED);
6466 
6467 	if (hdev_is_powered(hdev))
6468 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6469 				       MGMT_STATUS_REJECTED);
6470 
6471 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6472 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6473 			return mgmt_cmd_status(sk, hdev->id,
6474 					       MGMT_OP_SET_STATIC_ADDRESS,
6475 					       MGMT_STATUS_INVALID_PARAMS);
6476 
6477 		/* Two most significant bits shall be set */
6478 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6479 			return mgmt_cmd_status(sk, hdev->id,
6480 					       MGMT_OP_SET_STATIC_ADDRESS,
6481 					       MGMT_STATUS_INVALID_PARAMS);
6482 	}
6483 
6484 	hci_dev_lock(hdev);
6485 
6486 	bacpy(&hdev->static_addr, &cp->bdaddr);
6487 
6488 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6489 	if (err < 0)
6490 		goto unlock;
6491 
6492 	err = new_settings(hdev, sk);
6493 
6494 unlock:
6495 	hci_dev_unlock(hdev);
6496 	return err;
6497 }
6498 
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6499 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6500 			   void *data, u16 len)
6501 {
6502 	struct mgmt_cp_set_scan_params *cp = data;
6503 	__u16 interval, window;
6504 	int err;
6505 
6506 	bt_dev_dbg(hdev, "sock %p", sk);
6507 
6508 	if (!lmp_le_capable(hdev))
6509 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6510 				       MGMT_STATUS_NOT_SUPPORTED);
6511 
6512 	interval = __le16_to_cpu(cp->interval);
6513 
6514 	if (interval < 0x0004 || interval > 0x4000)
6515 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6516 				       MGMT_STATUS_INVALID_PARAMS);
6517 
6518 	window = __le16_to_cpu(cp->window);
6519 
6520 	if (window < 0x0004 || window > 0x4000)
6521 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6522 				       MGMT_STATUS_INVALID_PARAMS);
6523 
6524 	if (window > interval)
6525 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6526 				       MGMT_STATUS_INVALID_PARAMS);
6527 
6528 	hci_dev_lock(hdev);
6529 
6530 	hdev->le_scan_interval = interval;
6531 	hdev->le_scan_window = window;
6532 
6533 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6534 				NULL, 0);
6535 
6536 	/* If background scan is running, restart it so new parameters are
6537 	 * loaded.
6538 	 */
6539 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6540 	    hdev->discovery.state == DISCOVERY_STOPPED)
6541 		hci_update_passive_scan(hdev);
6542 
6543 	hci_dev_unlock(hdev);
6544 
6545 	return err;
6546 }
6547 
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6548 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6549 {
6550 	struct mgmt_pending_cmd *cmd = data;
6551 
6552 	bt_dev_dbg(hdev, "err %d", err);
6553 
6554 	if (err) {
6555 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6556 				mgmt_status(err));
6557 	} else {
6558 		struct mgmt_mode *cp = cmd->param;
6559 
6560 		if (cp->val)
6561 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6562 		else
6563 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6564 
6565 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6566 		new_settings(hdev, cmd->sk);
6567 	}
6568 
6569 	mgmt_pending_free(cmd);
6570 }
6571 
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6572 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6573 {
6574 	struct mgmt_pending_cmd *cmd = data;
6575 	struct mgmt_mode *cp = cmd->param;
6576 
6577 	return hci_write_fast_connectable_sync(hdev, cp->val);
6578 }
6579 
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6580 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6581 				void *data, u16 len)
6582 {
6583 	struct mgmt_mode *cp = data;
6584 	struct mgmt_pending_cmd *cmd;
6585 	int err;
6586 
6587 	bt_dev_dbg(hdev, "sock %p", sk);
6588 
6589 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6590 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6591 		return mgmt_cmd_status(sk, hdev->id,
6592 				       MGMT_OP_SET_FAST_CONNECTABLE,
6593 				       MGMT_STATUS_NOT_SUPPORTED);
6594 
6595 	if (cp->val != 0x00 && cp->val != 0x01)
6596 		return mgmt_cmd_status(sk, hdev->id,
6597 				       MGMT_OP_SET_FAST_CONNECTABLE,
6598 				       MGMT_STATUS_INVALID_PARAMS);
6599 
6600 	hci_dev_lock(hdev);
6601 
6602 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6603 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6604 		goto unlock;
6605 	}
6606 
6607 	if (!hdev_is_powered(hdev)) {
6608 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6609 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6610 		new_settings(hdev, sk);
6611 		goto unlock;
6612 	}
6613 
6614 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6615 			       len);
6616 	if (!cmd)
6617 		err = -ENOMEM;
6618 	else
6619 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6620 					 fast_connectable_complete);
6621 
6622 	if (err < 0) {
6623 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6624 				MGMT_STATUS_FAILED);
6625 
6626 		if (cmd)
6627 			mgmt_pending_free(cmd);
6628 	}
6629 
6630 unlock:
6631 	hci_dev_unlock(hdev);
6632 
6633 	return err;
6634 }
6635 
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6636 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6637 {
6638 	struct mgmt_pending_cmd *cmd = data;
6639 
6640 	bt_dev_dbg(hdev, "err %d", err);
6641 
6642 	if (err) {
6643 		u8 mgmt_err = mgmt_status(err);
6644 
6645 		/* We need to restore the flag if related HCI commands
6646 		 * failed.
6647 		 */
6648 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6649 
6650 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6651 	} else {
6652 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6653 		new_settings(hdev, cmd->sk);
6654 	}
6655 
6656 	mgmt_pending_free(cmd);
6657 }
6658 
set_bredr_sync(struct hci_dev * hdev,void * data)6659 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6660 {
6661 	int status;
6662 
6663 	status = hci_write_fast_connectable_sync(hdev, false);
6664 
6665 	if (!status)
6666 		status = hci_update_scan_sync(hdev);
6667 
6668 	/* Since only the advertising data flags will change, there
6669 	 * is no need to update the scan response data.
6670 	 */
6671 	if (!status)
6672 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6673 
6674 	return status;
6675 }
6676 
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6677 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6678 {
6679 	struct mgmt_mode *cp = data;
6680 	struct mgmt_pending_cmd *cmd;
6681 	int err;
6682 
6683 	bt_dev_dbg(hdev, "sock %p", sk);
6684 
6685 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6686 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6687 				       MGMT_STATUS_NOT_SUPPORTED);
6688 
6689 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6690 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6691 				       MGMT_STATUS_REJECTED);
6692 
6693 	if (cp->val != 0x00 && cp->val != 0x01)
6694 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6695 				       MGMT_STATUS_INVALID_PARAMS);
6696 
6697 	hci_dev_lock(hdev);
6698 
6699 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6700 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6701 		goto unlock;
6702 	}
6703 
6704 	if (!hdev_is_powered(hdev)) {
6705 		if (!cp->val) {
6706 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6707 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6708 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6709 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6710 		}
6711 
6712 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6713 
6714 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6715 		if (err < 0)
6716 			goto unlock;
6717 
6718 		err = new_settings(hdev, sk);
6719 		goto unlock;
6720 	}
6721 
6722 	/* Reject disabling when powered on */
6723 	if (!cp->val) {
6724 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6725 				      MGMT_STATUS_REJECTED);
6726 		goto unlock;
6727 	} else {
6728 		/* When configuring a dual-mode controller to operate
6729 		 * with LE only and using a static address, then switching
6730 		 * BR/EDR back on is not allowed.
6731 		 *
6732 		 * Dual-mode controllers shall operate with the public
6733 		 * address as its identity address for BR/EDR and LE. So
6734 		 * reject the attempt to create an invalid configuration.
6735 		 *
6736 		 * The same restrictions applies when secure connections
6737 		 * has been enabled. For BR/EDR this is a controller feature
6738 		 * while for LE it is a host stack feature. This means that
6739 		 * switching BR/EDR back on when secure connections has been
6740 		 * enabled is not a supported transaction.
6741 		 */
6742 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6743 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6744 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6745 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 					      MGMT_STATUS_REJECTED);
6747 			goto unlock;
6748 		}
6749 	}
6750 
6751 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6752 	if (!cmd)
6753 		err = -ENOMEM;
6754 	else
6755 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6756 					 set_bredr_complete);
6757 
6758 	if (err < 0) {
6759 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6760 				MGMT_STATUS_FAILED);
6761 		if (cmd)
6762 			mgmt_pending_free(cmd);
6763 
6764 		goto unlock;
6765 	}
6766 
6767 	/* We need to flip the bit already here so that
6768 	 * hci_req_update_adv_data generates the correct flags.
6769 	 */
6770 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6771 
6772 unlock:
6773 	hci_dev_unlock(hdev);
6774 	return err;
6775 }
6776 
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6777 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6778 {
6779 	struct mgmt_pending_cmd *cmd = data;
6780 	struct mgmt_mode *cp;
6781 
6782 	bt_dev_dbg(hdev, "err %d", err);
6783 
6784 	if (err) {
6785 		u8 mgmt_err = mgmt_status(err);
6786 
6787 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6788 		goto done;
6789 	}
6790 
6791 	cp = cmd->param;
6792 
6793 	switch (cp->val) {
6794 	case 0x00:
6795 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6796 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6797 		break;
6798 	case 0x01:
6799 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6800 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6801 		break;
6802 	case 0x02:
6803 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6804 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6805 		break;
6806 	}
6807 
6808 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6809 	new_settings(hdev, cmd->sk);
6810 
6811 done:
6812 	mgmt_pending_free(cmd);
6813 }
6814 
set_secure_conn_sync(struct hci_dev * hdev,void * data)6815 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6816 {
6817 	struct mgmt_pending_cmd *cmd = data;
6818 	struct mgmt_mode *cp = cmd->param;
6819 	u8 val = !!cp->val;
6820 
6821 	/* Force write of val */
6822 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6823 
6824 	return hci_write_sc_support_sync(hdev, val);
6825 }
6826 
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6827 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6828 			   void *data, u16 len)
6829 {
6830 	struct mgmt_mode *cp = data;
6831 	struct mgmt_pending_cmd *cmd;
6832 	u8 val;
6833 	int err;
6834 
6835 	bt_dev_dbg(hdev, "sock %p", sk);
6836 
6837 	if (!lmp_sc_capable(hdev) &&
6838 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6839 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6840 				       MGMT_STATUS_NOT_SUPPORTED);
6841 
6842 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6843 	    lmp_sc_capable(hdev) &&
6844 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6845 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6846 				       MGMT_STATUS_REJECTED);
6847 
6848 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6849 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6850 				       MGMT_STATUS_INVALID_PARAMS);
6851 
6852 	hci_dev_lock(hdev);
6853 
6854 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6855 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6856 		bool changed;
6857 
6858 		if (cp->val) {
6859 			changed = !hci_dev_test_and_set_flag(hdev,
6860 							     HCI_SC_ENABLED);
6861 			if (cp->val == 0x02)
6862 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6863 			else
6864 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6865 		} else {
6866 			changed = hci_dev_test_and_clear_flag(hdev,
6867 							      HCI_SC_ENABLED);
6868 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6869 		}
6870 
6871 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6872 		if (err < 0)
6873 			goto failed;
6874 
6875 		if (changed)
6876 			err = new_settings(hdev, sk);
6877 
6878 		goto failed;
6879 	}
6880 
6881 	val = !!cp->val;
6882 
6883 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6884 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6885 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6886 		goto failed;
6887 	}
6888 
6889 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6890 	if (!cmd)
6891 		err = -ENOMEM;
6892 	else
6893 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6894 					 set_secure_conn_complete);
6895 
6896 	if (err < 0) {
6897 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6898 				MGMT_STATUS_FAILED);
6899 		if (cmd)
6900 			mgmt_pending_free(cmd);
6901 	}
6902 
6903 failed:
6904 	hci_dev_unlock(hdev);
6905 	return err;
6906 }
6907 
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6908 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6909 			  void *data, u16 len)
6910 {
6911 	struct mgmt_mode *cp = data;
6912 	bool changed, use_changed;
6913 	int err;
6914 
6915 	bt_dev_dbg(hdev, "sock %p", sk);
6916 
6917 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6918 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6919 				       MGMT_STATUS_INVALID_PARAMS);
6920 
6921 	hci_dev_lock(hdev);
6922 
6923 	if (cp->val)
6924 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6925 	else
6926 		changed = hci_dev_test_and_clear_flag(hdev,
6927 						      HCI_KEEP_DEBUG_KEYS);
6928 
6929 	if (cp->val == 0x02)
6930 		use_changed = !hci_dev_test_and_set_flag(hdev,
6931 							 HCI_USE_DEBUG_KEYS);
6932 	else
6933 		use_changed = hci_dev_test_and_clear_flag(hdev,
6934 							  HCI_USE_DEBUG_KEYS);
6935 
6936 	if (hdev_is_powered(hdev) && use_changed &&
6937 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6938 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6939 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6940 			     sizeof(mode), &mode);
6941 	}
6942 
6943 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6944 	if (err < 0)
6945 		goto unlock;
6946 
6947 	if (changed)
6948 		err = new_settings(hdev, sk);
6949 
6950 unlock:
6951 	hci_dev_unlock(hdev);
6952 	return err;
6953 }
6954 
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6955 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6956 		       u16 len)
6957 {
6958 	struct mgmt_cp_set_privacy *cp = cp_data;
6959 	bool changed;
6960 	int err;
6961 
6962 	bt_dev_dbg(hdev, "sock %p", sk);
6963 
6964 	if (!lmp_le_capable(hdev))
6965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6966 				       MGMT_STATUS_NOT_SUPPORTED);
6967 
6968 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6970 				       MGMT_STATUS_INVALID_PARAMS);
6971 
6972 	if (hdev_is_powered(hdev))
6973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6974 				       MGMT_STATUS_REJECTED);
6975 
6976 	hci_dev_lock(hdev);
6977 
6978 	/* If user space supports this command it is also expected to
6979 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6980 	 */
6981 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6982 
6983 	if (cp->privacy) {
6984 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6985 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6986 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6987 		hci_adv_instances_set_rpa_expired(hdev, true);
6988 		if (cp->privacy == 0x02)
6989 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6990 		else
6991 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6992 	} else {
6993 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6994 		memset(hdev->irk, 0, sizeof(hdev->irk));
6995 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6996 		hci_adv_instances_set_rpa_expired(hdev, false);
6997 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6998 	}
6999 
7000 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7001 	if (err < 0)
7002 		goto unlock;
7003 
7004 	if (changed)
7005 		err = new_settings(hdev, sk);
7006 
7007 unlock:
7008 	hci_dev_unlock(hdev);
7009 	return err;
7010 }
7011 
irk_is_valid(struct mgmt_irk_info * irk)7012 static bool irk_is_valid(struct mgmt_irk_info *irk)
7013 {
7014 	switch (irk->addr.type) {
7015 	case BDADDR_LE_PUBLIC:
7016 		return true;
7017 
7018 	case BDADDR_LE_RANDOM:
7019 		/* Two most significant bits shall be set */
7020 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7021 			return false;
7022 		return true;
7023 	}
7024 
7025 	return false;
7026 }
7027 
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7028 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7029 		     u16 len)
7030 {
7031 	struct mgmt_cp_load_irks *cp = cp_data;
7032 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7033 				   sizeof(struct mgmt_irk_info));
7034 	u16 irk_count, expected_len;
7035 	int i, err;
7036 
7037 	bt_dev_dbg(hdev, "sock %p", sk);
7038 
7039 	if (!lmp_le_capable(hdev))
7040 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7041 				       MGMT_STATUS_NOT_SUPPORTED);
7042 
7043 	irk_count = __le16_to_cpu(cp->irk_count);
7044 	if (irk_count > max_irk_count) {
7045 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7046 			   irk_count);
7047 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7048 				       MGMT_STATUS_INVALID_PARAMS);
7049 	}
7050 
7051 	expected_len = struct_size(cp, irks, irk_count);
7052 	if (expected_len != len) {
7053 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7054 			   expected_len, len);
7055 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7056 				       MGMT_STATUS_INVALID_PARAMS);
7057 	}
7058 
7059 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7060 
7061 	for (i = 0; i < irk_count; i++) {
7062 		struct mgmt_irk_info *key = &cp->irks[i];
7063 
7064 		if (!irk_is_valid(key))
7065 			return mgmt_cmd_status(sk, hdev->id,
7066 					       MGMT_OP_LOAD_IRKS,
7067 					       MGMT_STATUS_INVALID_PARAMS);
7068 	}
7069 
7070 	hci_dev_lock(hdev);
7071 
7072 	hci_smp_irks_clear(hdev);
7073 
7074 	for (i = 0; i < irk_count; i++) {
7075 		struct mgmt_irk_info *irk = &cp->irks[i];
7076 		u8 addr_type = le_addr_type(irk->addr.type);
7077 
7078 		if (hci_is_blocked_key(hdev,
7079 				       HCI_BLOCKED_KEY_TYPE_IRK,
7080 				       irk->val)) {
7081 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7082 				    &irk->addr.bdaddr);
7083 			continue;
7084 		}
7085 
7086 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7087 		if (irk->addr.type == BDADDR_BREDR)
7088 			addr_type = BDADDR_BREDR;
7089 
7090 		hci_add_irk(hdev, &irk->addr.bdaddr,
7091 			    addr_type, irk->val,
7092 			    BDADDR_ANY);
7093 	}
7094 
7095 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7096 
7097 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7098 
7099 	hci_dev_unlock(hdev);
7100 
7101 	return err;
7102 }
7103 
ltk_is_valid(struct mgmt_ltk_info * key)7104 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7105 {
7106 	if (key->initiator != 0x00 && key->initiator != 0x01)
7107 		return false;
7108 
7109 	switch (key->addr.type) {
7110 	case BDADDR_LE_PUBLIC:
7111 		return true;
7112 
7113 	case BDADDR_LE_RANDOM:
7114 		/* Two most significant bits shall be set */
7115 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7116 			return false;
7117 		return true;
7118 	}
7119 
7120 	return false;
7121 }
7122 
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7123 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7124 			       void *cp_data, u16 len)
7125 {
7126 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7127 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7128 				   sizeof(struct mgmt_ltk_info));
7129 	u16 key_count, expected_len;
7130 	int i, err;
7131 
7132 	bt_dev_dbg(hdev, "sock %p", sk);
7133 
7134 	if (!lmp_le_capable(hdev))
7135 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7136 				       MGMT_STATUS_NOT_SUPPORTED);
7137 
7138 	key_count = __le16_to_cpu(cp->key_count);
7139 	if (key_count > max_key_count) {
7140 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7141 			   key_count);
7142 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7143 				       MGMT_STATUS_INVALID_PARAMS);
7144 	}
7145 
7146 	expected_len = struct_size(cp, keys, key_count);
7147 	if (expected_len != len) {
7148 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7149 			   expected_len, len);
7150 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7151 				       MGMT_STATUS_INVALID_PARAMS);
7152 	}
7153 
7154 	bt_dev_dbg(hdev, "key_count %u", key_count);
7155 
7156 	for (i = 0; i < key_count; i++) {
7157 		struct mgmt_ltk_info *key = &cp->keys[i];
7158 
7159 		if (!ltk_is_valid(key))
7160 			return mgmt_cmd_status(sk, hdev->id,
7161 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7162 					       MGMT_STATUS_INVALID_PARAMS);
7163 	}
7164 
7165 	hci_dev_lock(hdev);
7166 
7167 	hci_smp_ltks_clear(hdev);
7168 
7169 	for (i = 0; i < key_count; i++) {
7170 		struct mgmt_ltk_info *key = &cp->keys[i];
7171 		u8 type, authenticated;
7172 		u8 addr_type = le_addr_type(key->addr.type);
7173 
7174 		if (hci_is_blocked_key(hdev,
7175 				       HCI_BLOCKED_KEY_TYPE_LTK,
7176 				       key->val)) {
7177 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7178 				    &key->addr.bdaddr);
7179 			continue;
7180 		}
7181 
7182 		switch (key->type) {
7183 		case MGMT_LTK_UNAUTHENTICATED:
7184 			authenticated = 0x00;
7185 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7186 			break;
7187 		case MGMT_LTK_AUTHENTICATED:
7188 			authenticated = 0x01;
7189 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7190 			break;
7191 		case MGMT_LTK_P256_UNAUTH:
7192 			authenticated = 0x00;
7193 			type = SMP_LTK_P256;
7194 			break;
7195 		case MGMT_LTK_P256_AUTH:
7196 			authenticated = 0x01;
7197 			type = SMP_LTK_P256;
7198 			break;
7199 		case MGMT_LTK_P256_DEBUG:
7200 			authenticated = 0x00;
7201 			type = SMP_LTK_P256_DEBUG;
7202 			fallthrough;
7203 		default:
7204 			continue;
7205 		}
7206 
7207 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7208 		if (key->addr.type == BDADDR_BREDR)
7209 			addr_type = BDADDR_BREDR;
7210 
7211 		hci_add_ltk(hdev, &key->addr.bdaddr,
7212 			    addr_type, type, authenticated,
7213 			    key->val, key->enc_size, key->ediv, key->rand);
7214 	}
7215 
7216 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7217 			   NULL, 0);
7218 
7219 	hci_dev_unlock(hdev);
7220 
7221 	return err;
7222 }
7223 
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7224 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7225 {
7226 	struct mgmt_pending_cmd *cmd = data;
7227 	struct hci_conn *conn = cmd->user_data;
7228 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7229 	struct mgmt_rp_get_conn_info rp;
7230 	u8 status;
7231 
7232 	bt_dev_dbg(hdev, "err %d", err);
7233 
7234 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7235 
7236 	status = mgmt_status(err);
7237 	if (status == MGMT_STATUS_SUCCESS) {
7238 		rp.rssi = conn->rssi;
7239 		rp.tx_power = conn->tx_power;
7240 		rp.max_tx_power = conn->max_tx_power;
7241 	} else {
7242 		rp.rssi = HCI_RSSI_INVALID;
7243 		rp.tx_power = HCI_TX_POWER_INVALID;
7244 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7245 	}
7246 
7247 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7248 			  &rp, sizeof(rp));
7249 
7250 	mgmt_pending_free(cmd);
7251 }
7252 
get_conn_info_sync(struct hci_dev * hdev,void * data)7253 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7254 {
7255 	struct mgmt_pending_cmd *cmd = data;
7256 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7257 	struct hci_conn *conn;
7258 	int err;
7259 	__le16   handle;
7260 
7261 	/* Make sure we are still connected */
7262 	if (cp->addr.type == BDADDR_BREDR)
7263 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7264 					       &cp->addr.bdaddr);
7265 	else
7266 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7267 
7268 	if (!conn || conn->state != BT_CONNECTED)
7269 		return MGMT_STATUS_NOT_CONNECTED;
7270 
7271 	cmd->user_data = conn;
7272 	handle = cpu_to_le16(conn->handle);
7273 
7274 	/* Refresh RSSI each time */
7275 	err = hci_read_rssi_sync(hdev, handle);
7276 
7277 	/* For LE links TX power does not change thus we don't need to
7278 	 * query for it once value is known.
7279 	 */
7280 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7281 		     conn->tx_power == HCI_TX_POWER_INVALID))
7282 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7283 
7284 	/* Max TX power needs to be read only once per connection */
7285 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7286 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7287 
7288 	return err;
7289 }
7290 
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7291 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7292 			 u16 len)
7293 {
7294 	struct mgmt_cp_get_conn_info *cp = data;
7295 	struct mgmt_rp_get_conn_info rp;
7296 	struct hci_conn *conn;
7297 	unsigned long conn_info_age;
7298 	int err = 0;
7299 
7300 	bt_dev_dbg(hdev, "sock %p", sk);
7301 
7302 	memset(&rp, 0, sizeof(rp));
7303 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7304 	rp.addr.type = cp->addr.type;
7305 
7306 	if (!bdaddr_type_is_valid(cp->addr.type))
7307 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7308 					 MGMT_STATUS_INVALID_PARAMS,
7309 					 &rp, sizeof(rp));
7310 
7311 	hci_dev_lock(hdev);
7312 
7313 	if (!hdev_is_powered(hdev)) {
7314 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7315 					MGMT_STATUS_NOT_POWERED, &rp,
7316 					sizeof(rp));
7317 		goto unlock;
7318 	}
7319 
7320 	if (cp->addr.type == BDADDR_BREDR)
7321 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7322 					       &cp->addr.bdaddr);
7323 	else
7324 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7325 
7326 	if (!conn || conn->state != BT_CONNECTED) {
7327 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7328 					MGMT_STATUS_NOT_CONNECTED, &rp,
7329 					sizeof(rp));
7330 		goto unlock;
7331 	}
7332 
7333 	/* To avoid client trying to guess when to poll again for information we
7334 	 * calculate conn info age as random value between min/max set in hdev.
7335 	 */
7336 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7337 						 hdev->conn_info_max_age - 1);
7338 
7339 	/* Query controller to refresh cached values if they are too old or were
7340 	 * never read.
7341 	 */
7342 	if (time_after(jiffies, conn->conn_info_timestamp +
7343 		       msecs_to_jiffies(conn_info_age)) ||
7344 	    !conn->conn_info_timestamp) {
7345 		struct mgmt_pending_cmd *cmd;
7346 
7347 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7348 				       len);
7349 		if (!cmd) {
7350 			err = -ENOMEM;
7351 		} else {
7352 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7353 						 cmd, get_conn_info_complete);
7354 		}
7355 
7356 		if (err < 0) {
7357 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7358 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7359 
7360 			if (cmd)
7361 				mgmt_pending_free(cmd);
7362 
7363 			goto unlock;
7364 		}
7365 
7366 		conn->conn_info_timestamp = jiffies;
7367 	} else {
7368 		/* Cache is valid, just reply with values cached in hci_conn */
7369 		rp.rssi = conn->rssi;
7370 		rp.tx_power = conn->tx_power;
7371 		rp.max_tx_power = conn->max_tx_power;
7372 
7373 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7374 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7375 	}
7376 
7377 unlock:
7378 	hci_dev_unlock(hdev);
7379 	return err;
7380 }
7381 
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7382 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7383 {
7384 	struct mgmt_pending_cmd *cmd = data;
7385 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7386 	struct mgmt_rp_get_clock_info rp;
7387 	struct hci_conn *conn = cmd->user_data;
7388 	u8 status = mgmt_status(err);
7389 
7390 	bt_dev_dbg(hdev, "err %d", err);
7391 
7392 	memset(&rp, 0, sizeof(rp));
7393 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7394 	rp.addr.type = cp->addr.type;
7395 
7396 	if (err)
7397 		goto complete;
7398 
7399 	rp.local_clock = cpu_to_le32(hdev->clock);
7400 
7401 	if (conn) {
7402 		rp.piconet_clock = cpu_to_le32(conn->clock);
7403 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7404 	}
7405 
7406 complete:
7407 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7408 			  sizeof(rp));
7409 
7410 	mgmt_pending_free(cmd);
7411 }
7412 
get_clock_info_sync(struct hci_dev * hdev,void * data)7413 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7414 {
7415 	struct mgmt_pending_cmd *cmd = data;
7416 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7417 	struct hci_cp_read_clock hci_cp;
7418 	struct hci_conn *conn;
7419 
7420 	memset(&hci_cp, 0, sizeof(hci_cp));
7421 	hci_read_clock_sync(hdev, &hci_cp);
7422 
7423 	/* Make sure connection still exists */
7424 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7425 	if (!conn || conn->state != BT_CONNECTED)
7426 		return MGMT_STATUS_NOT_CONNECTED;
7427 
7428 	cmd->user_data = conn;
7429 	hci_cp.handle = cpu_to_le16(conn->handle);
7430 	hci_cp.which = 0x01; /* Piconet clock */
7431 
7432 	return hci_read_clock_sync(hdev, &hci_cp);
7433 }
7434 
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7435 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7436 								u16 len)
7437 {
7438 	struct mgmt_cp_get_clock_info *cp = data;
7439 	struct mgmt_rp_get_clock_info rp;
7440 	struct mgmt_pending_cmd *cmd;
7441 	struct hci_conn *conn;
7442 	int err;
7443 
7444 	bt_dev_dbg(hdev, "sock %p", sk);
7445 
7446 	memset(&rp, 0, sizeof(rp));
7447 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7448 	rp.addr.type = cp->addr.type;
7449 
7450 	if (cp->addr.type != BDADDR_BREDR)
7451 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7452 					 MGMT_STATUS_INVALID_PARAMS,
7453 					 &rp, sizeof(rp));
7454 
7455 	hci_dev_lock(hdev);
7456 
7457 	if (!hdev_is_powered(hdev)) {
7458 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7459 					MGMT_STATUS_NOT_POWERED, &rp,
7460 					sizeof(rp));
7461 		goto unlock;
7462 	}
7463 
7464 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7465 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7466 					       &cp->addr.bdaddr);
7467 		if (!conn || conn->state != BT_CONNECTED) {
7468 			err = mgmt_cmd_complete(sk, hdev->id,
7469 						MGMT_OP_GET_CLOCK_INFO,
7470 						MGMT_STATUS_NOT_CONNECTED,
7471 						&rp, sizeof(rp));
7472 			goto unlock;
7473 		}
7474 	} else {
7475 		conn = NULL;
7476 	}
7477 
7478 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7479 	if (!cmd)
7480 		err = -ENOMEM;
7481 	else
7482 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7483 					 get_clock_info_complete);
7484 
7485 	if (err < 0) {
7486 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7487 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7488 
7489 		if (cmd)
7490 			mgmt_pending_free(cmd);
7491 	}
7492 
7493 
7494 unlock:
7495 	hci_dev_unlock(hdev);
7496 	return err;
7497 }
7498 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7499 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7500 {
7501 	struct hci_conn *conn;
7502 
7503 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7504 	if (!conn)
7505 		return false;
7506 
7507 	if (conn->dst_type != type)
7508 		return false;
7509 
7510 	if (conn->state != BT_CONNECTED)
7511 		return false;
7512 
7513 	return true;
7514 }
7515 
7516 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7517 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7518 			       u8 addr_type, u8 auto_connect)
7519 {
7520 	struct hci_conn_params *params;
7521 
7522 	params = hci_conn_params_add(hdev, addr, addr_type);
7523 	if (!params)
7524 		return -EIO;
7525 
7526 	if (params->auto_connect == auto_connect)
7527 		return 0;
7528 
7529 	hci_pend_le_list_del_init(params);
7530 
7531 	switch (auto_connect) {
7532 	case HCI_AUTO_CONN_DISABLED:
7533 	case HCI_AUTO_CONN_LINK_LOSS:
7534 		/* If auto connect is being disabled when we're trying to
7535 		 * connect to device, keep connecting.
7536 		 */
7537 		if (params->explicit_connect)
7538 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7539 		break;
7540 	case HCI_AUTO_CONN_REPORT:
7541 		if (params->explicit_connect)
7542 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7543 		else
7544 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7545 		break;
7546 	case HCI_AUTO_CONN_DIRECT:
7547 	case HCI_AUTO_CONN_ALWAYS:
7548 		if (!is_connected(hdev, addr, addr_type))
7549 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7550 		break;
7551 	}
7552 
7553 	params->auto_connect = auto_connect;
7554 
7555 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7556 		   addr, addr_type, auto_connect);
7557 
7558 	return 0;
7559 }
7560 
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7561 static void device_added(struct sock *sk, struct hci_dev *hdev,
7562 			 bdaddr_t *bdaddr, u8 type, u8 action)
7563 {
7564 	struct mgmt_ev_device_added ev;
7565 
7566 	bacpy(&ev.addr.bdaddr, bdaddr);
7567 	ev.addr.type = type;
7568 	ev.action = action;
7569 
7570 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7571 }
7572 
add_device_sync(struct hci_dev * hdev,void * data)7573 static int add_device_sync(struct hci_dev *hdev, void *data)
7574 {
7575 	return hci_update_passive_scan_sync(hdev);
7576 }
7577 
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7578 static int add_device(struct sock *sk, struct hci_dev *hdev,
7579 		      void *data, u16 len)
7580 {
7581 	struct mgmt_cp_add_device *cp = data;
7582 	u8 auto_conn, addr_type;
7583 	struct hci_conn_params *params;
7584 	int err;
7585 	u32 current_flags = 0;
7586 	u32 supported_flags;
7587 
7588 	bt_dev_dbg(hdev, "sock %p", sk);
7589 
7590 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7591 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7592 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7593 					 MGMT_STATUS_INVALID_PARAMS,
7594 					 &cp->addr, sizeof(cp->addr));
7595 
7596 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7597 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7598 					 MGMT_STATUS_INVALID_PARAMS,
7599 					 &cp->addr, sizeof(cp->addr));
7600 
7601 	hci_dev_lock(hdev);
7602 
7603 	if (cp->addr.type == BDADDR_BREDR) {
7604 		/* Only incoming connections action is supported for now */
7605 		if (cp->action != 0x01) {
7606 			err = mgmt_cmd_complete(sk, hdev->id,
7607 						MGMT_OP_ADD_DEVICE,
7608 						MGMT_STATUS_INVALID_PARAMS,
7609 						&cp->addr, sizeof(cp->addr));
7610 			goto unlock;
7611 		}
7612 
7613 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7614 						     &cp->addr.bdaddr,
7615 						     cp->addr.type, 0);
7616 		if (err)
7617 			goto unlock;
7618 
7619 		hci_update_scan(hdev);
7620 
7621 		goto added;
7622 	}
7623 
7624 	addr_type = le_addr_type(cp->addr.type);
7625 
7626 	if (cp->action == 0x02)
7627 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7628 	else if (cp->action == 0x01)
7629 		auto_conn = HCI_AUTO_CONN_DIRECT;
7630 	else
7631 		auto_conn = HCI_AUTO_CONN_REPORT;
7632 
7633 	/* Kernel internally uses conn_params with resolvable private
7634 	 * address, but Add Device allows only identity addresses.
7635 	 * Make sure it is enforced before calling
7636 	 * hci_conn_params_lookup.
7637 	 */
7638 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7639 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7640 					MGMT_STATUS_INVALID_PARAMS,
7641 					&cp->addr, sizeof(cp->addr));
7642 		goto unlock;
7643 	}
7644 
7645 	/* If the connection parameters don't exist for this device,
7646 	 * they will be created and configured with defaults.
7647 	 */
7648 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7649 				auto_conn) < 0) {
7650 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7651 					MGMT_STATUS_FAILED, &cp->addr,
7652 					sizeof(cp->addr));
7653 		goto unlock;
7654 	} else {
7655 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7656 						addr_type);
7657 		if (params)
7658 			current_flags = params->flags;
7659 	}
7660 
7661 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7662 	if (err < 0)
7663 		goto unlock;
7664 
7665 added:
7666 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7667 	supported_flags = hdev->conn_flags;
7668 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7669 			     supported_flags, current_flags);
7670 
7671 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7672 				MGMT_STATUS_SUCCESS, &cp->addr,
7673 				sizeof(cp->addr));
7674 
7675 unlock:
7676 	hci_dev_unlock(hdev);
7677 	return err;
7678 }
7679 
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7680 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7681 			   bdaddr_t *bdaddr, u8 type)
7682 {
7683 	struct mgmt_ev_device_removed ev;
7684 
7685 	bacpy(&ev.addr.bdaddr, bdaddr);
7686 	ev.addr.type = type;
7687 
7688 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7689 }
7690 
remove_device_sync(struct hci_dev * hdev,void * data)7691 static int remove_device_sync(struct hci_dev *hdev, void *data)
7692 {
7693 	return hci_update_passive_scan_sync(hdev);
7694 }
7695 
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7696 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7697 			 void *data, u16 len)
7698 {
7699 	struct mgmt_cp_remove_device *cp = data;
7700 	int err;
7701 
7702 	bt_dev_dbg(hdev, "sock %p", sk);
7703 
7704 	hci_dev_lock(hdev);
7705 
7706 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7707 		struct hci_conn_params *params;
7708 		u8 addr_type;
7709 
7710 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7711 			err = mgmt_cmd_complete(sk, hdev->id,
7712 						MGMT_OP_REMOVE_DEVICE,
7713 						MGMT_STATUS_INVALID_PARAMS,
7714 						&cp->addr, sizeof(cp->addr));
7715 			goto unlock;
7716 		}
7717 
7718 		if (cp->addr.type == BDADDR_BREDR) {
7719 			err = hci_bdaddr_list_del(&hdev->accept_list,
7720 						  &cp->addr.bdaddr,
7721 						  cp->addr.type);
7722 			if (err) {
7723 				err = mgmt_cmd_complete(sk, hdev->id,
7724 							MGMT_OP_REMOVE_DEVICE,
7725 							MGMT_STATUS_INVALID_PARAMS,
7726 							&cp->addr,
7727 							sizeof(cp->addr));
7728 				goto unlock;
7729 			}
7730 
7731 			hci_update_scan(hdev);
7732 
7733 			device_removed(sk, hdev, &cp->addr.bdaddr,
7734 				       cp->addr.type);
7735 			goto complete;
7736 		}
7737 
7738 		addr_type = le_addr_type(cp->addr.type);
7739 
7740 		/* Kernel internally uses conn_params with resolvable private
7741 		 * address, but Remove Device allows only identity addresses.
7742 		 * Make sure it is enforced before calling
7743 		 * hci_conn_params_lookup.
7744 		 */
7745 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7746 			err = mgmt_cmd_complete(sk, hdev->id,
7747 						MGMT_OP_REMOVE_DEVICE,
7748 						MGMT_STATUS_INVALID_PARAMS,
7749 						&cp->addr, sizeof(cp->addr));
7750 			goto unlock;
7751 		}
7752 
7753 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7754 						addr_type);
7755 		if (!params) {
7756 			err = mgmt_cmd_complete(sk, hdev->id,
7757 						MGMT_OP_REMOVE_DEVICE,
7758 						MGMT_STATUS_INVALID_PARAMS,
7759 						&cp->addr, sizeof(cp->addr));
7760 			goto unlock;
7761 		}
7762 
7763 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7764 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7765 			err = mgmt_cmd_complete(sk, hdev->id,
7766 						MGMT_OP_REMOVE_DEVICE,
7767 						MGMT_STATUS_INVALID_PARAMS,
7768 						&cp->addr, sizeof(cp->addr));
7769 			goto unlock;
7770 		}
7771 
7772 		hci_conn_params_free(params);
7773 
7774 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7775 	} else {
7776 		struct hci_conn_params *p, *tmp;
7777 		struct bdaddr_list *b, *btmp;
7778 
7779 		if (cp->addr.type) {
7780 			err = mgmt_cmd_complete(sk, hdev->id,
7781 						MGMT_OP_REMOVE_DEVICE,
7782 						MGMT_STATUS_INVALID_PARAMS,
7783 						&cp->addr, sizeof(cp->addr));
7784 			goto unlock;
7785 		}
7786 
7787 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7788 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7789 			list_del(&b->list);
7790 			kfree(b);
7791 		}
7792 
7793 		hci_update_scan(hdev);
7794 
7795 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7796 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7797 				continue;
7798 			device_removed(sk, hdev, &p->addr, p->addr_type);
7799 			if (p->explicit_connect) {
7800 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7801 				continue;
7802 			}
7803 			hci_conn_params_free(p);
7804 		}
7805 
7806 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7807 	}
7808 
7809 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7810 
7811 complete:
7812 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7813 				MGMT_STATUS_SUCCESS, &cp->addr,
7814 				sizeof(cp->addr));
7815 unlock:
7816 	hci_dev_unlock(hdev);
7817 	return err;
7818 }
7819 
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7820 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7821 			   u16 len)
7822 {
7823 	struct mgmt_cp_load_conn_param *cp = data;
7824 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7825 				     sizeof(struct mgmt_conn_param));
7826 	u16 param_count, expected_len;
7827 	int i;
7828 
7829 	if (!lmp_le_capable(hdev))
7830 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7831 				       MGMT_STATUS_NOT_SUPPORTED);
7832 
7833 	param_count = __le16_to_cpu(cp->param_count);
7834 	if (param_count > max_param_count) {
7835 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7836 			   param_count);
7837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7838 				       MGMT_STATUS_INVALID_PARAMS);
7839 	}
7840 
7841 	expected_len = struct_size(cp, params, param_count);
7842 	if (expected_len != len) {
7843 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7844 			   expected_len, len);
7845 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7846 				       MGMT_STATUS_INVALID_PARAMS);
7847 	}
7848 
7849 	bt_dev_dbg(hdev, "param_count %u", param_count);
7850 
7851 	hci_dev_lock(hdev);
7852 
7853 	hci_conn_params_clear_disabled(hdev);
7854 
7855 	for (i = 0; i < param_count; i++) {
7856 		struct mgmt_conn_param *param = &cp->params[i];
7857 		struct hci_conn_params *hci_param;
7858 		u16 min, max, latency, timeout;
7859 		u8 addr_type;
7860 
7861 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7862 			   param->addr.type);
7863 
7864 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7865 			addr_type = ADDR_LE_DEV_PUBLIC;
7866 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7867 			addr_type = ADDR_LE_DEV_RANDOM;
7868 		} else {
7869 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7870 			continue;
7871 		}
7872 
7873 		min = le16_to_cpu(param->min_interval);
7874 		max = le16_to_cpu(param->max_interval);
7875 		latency = le16_to_cpu(param->latency);
7876 		timeout = le16_to_cpu(param->timeout);
7877 
7878 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7879 			   min, max, latency, timeout);
7880 
7881 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7882 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7883 			continue;
7884 		}
7885 
7886 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7887 						addr_type);
7888 		if (!hci_param) {
7889 			bt_dev_err(hdev, "failed to add connection parameters");
7890 			continue;
7891 		}
7892 
7893 		hci_param->conn_min_interval = min;
7894 		hci_param->conn_max_interval = max;
7895 		hci_param->conn_latency = latency;
7896 		hci_param->supervision_timeout = timeout;
7897 	}
7898 
7899 	hci_dev_unlock(hdev);
7900 
7901 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7902 				 NULL, 0);
7903 }
7904 
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7905 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7906 			       void *data, u16 len)
7907 {
7908 	struct mgmt_cp_set_external_config *cp = data;
7909 	bool changed;
7910 	int err;
7911 
7912 	bt_dev_dbg(hdev, "sock %p", sk);
7913 
7914 	if (hdev_is_powered(hdev))
7915 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7916 				       MGMT_STATUS_REJECTED);
7917 
7918 	if (cp->config != 0x00 && cp->config != 0x01)
7919 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7920 				         MGMT_STATUS_INVALID_PARAMS);
7921 
7922 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7923 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7924 				       MGMT_STATUS_NOT_SUPPORTED);
7925 
7926 	hci_dev_lock(hdev);
7927 
7928 	if (cp->config)
7929 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7930 	else
7931 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7932 
7933 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7934 	if (err < 0)
7935 		goto unlock;
7936 
7937 	if (!changed)
7938 		goto unlock;
7939 
7940 	err = new_options(hdev, sk);
7941 
7942 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7943 		mgmt_index_removed(hdev);
7944 
7945 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7946 			hci_dev_set_flag(hdev, HCI_CONFIG);
7947 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7948 
7949 			queue_work(hdev->req_workqueue, &hdev->power_on);
7950 		} else {
7951 			set_bit(HCI_RAW, &hdev->flags);
7952 			mgmt_index_added(hdev);
7953 		}
7954 	}
7955 
7956 unlock:
7957 	hci_dev_unlock(hdev);
7958 	return err;
7959 }
7960 
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7961 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7962 			      void *data, u16 len)
7963 {
7964 	struct mgmt_cp_set_public_address *cp = data;
7965 	bool changed;
7966 	int err;
7967 
7968 	bt_dev_dbg(hdev, "sock %p", sk);
7969 
7970 	if (hdev_is_powered(hdev))
7971 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7972 				       MGMT_STATUS_REJECTED);
7973 
7974 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7975 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7976 				       MGMT_STATUS_INVALID_PARAMS);
7977 
7978 	if (!hdev->set_bdaddr)
7979 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7980 				       MGMT_STATUS_NOT_SUPPORTED);
7981 
7982 	hci_dev_lock(hdev);
7983 
7984 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7985 	bacpy(&hdev->public_addr, &cp->bdaddr);
7986 
7987 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7988 	if (err < 0)
7989 		goto unlock;
7990 
7991 	if (!changed)
7992 		goto unlock;
7993 
7994 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7995 		err = new_options(hdev, sk);
7996 
7997 	if (is_configured(hdev)) {
7998 		mgmt_index_removed(hdev);
7999 
8000 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8001 
8002 		hci_dev_set_flag(hdev, HCI_CONFIG);
8003 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8004 
8005 		queue_work(hdev->req_workqueue, &hdev->power_on);
8006 	}
8007 
8008 unlock:
8009 	hci_dev_unlock(hdev);
8010 	return err;
8011 }
8012 
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8013 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8014 					     int err)
8015 {
8016 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8017 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8018 	u8 *h192, *r192, *h256, *r256;
8019 	struct mgmt_pending_cmd *cmd = data;
8020 	struct sk_buff *skb = cmd->skb;
8021 	u8 status = mgmt_status(err);
8022 	u16 eir_len;
8023 
8024 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8025 		return;
8026 
8027 	if (!status) {
8028 		if (!skb)
8029 			status = MGMT_STATUS_FAILED;
8030 		else if (IS_ERR(skb))
8031 			status = mgmt_status(PTR_ERR(skb));
8032 		else
8033 			status = mgmt_status(skb->data[0]);
8034 	}
8035 
8036 	bt_dev_dbg(hdev, "status %u", status);
8037 
8038 	mgmt_cp = cmd->param;
8039 
8040 	if (status) {
8041 		status = mgmt_status(status);
8042 		eir_len = 0;
8043 
8044 		h192 = NULL;
8045 		r192 = NULL;
8046 		h256 = NULL;
8047 		r256 = NULL;
8048 	} else if (!bredr_sc_enabled(hdev)) {
8049 		struct hci_rp_read_local_oob_data *rp;
8050 
8051 		if (skb->len != sizeof(*rp)) {
8052 			status = MGMT_STATUS_FAILED;
8053 			eir_len = 0;
8054 		} else {
8055 			status = MGMT_STATUS_SUCCESS;
8056 			rp = (void *)skb->data;
8057 
8058 			eir_len = 5 + 18 + 18;
8059 			h192 = rp->hash;
8060 			r192 = rp->rand;
8061 			h256 = NULL;
8062 			r256 = NULL;
8063 		}
8064 	} else {
8065 		struct hci_rp_read_local_oob_ext_data *rp;
8066 
8067 		if (skb->len != sizeof(*rp)) {
8068 			status = MGMT_STATUS_FAILED;
8069 			eir_len = 0;
8070 		} else {
8071 			status = MGMT_STATUS_SUCCESS;
8072 			rp = (void *)skb->data;
8073 
8074 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8075 				eir_len = 5 + 18 + 18;
8076 				h192 = NULL;
8077 				r192 = NULL;
8078 			} else {
8079 				eir_len = 5 + 18 + 18 + 18 + 18;
8080 				h192 = rp->hash192;
8081 				r192 = rp->rand192;
8082 			}
8083 
8084 			h256 = rp->hash256;
8085 			r256 = rp->rand256;
8086 		}
8087 	}
8088 
8089 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8090 	if (!mgmt_rp)
8091 		goto done;
8092 
8093 	if (eir_len == 0)
8094 		goto send_rsp;
8095 
8096 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8097 				  hdev->dev_class, 3);
8098 
8099 	if (h192 && r192) {
8100 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8101 					  EIR_SSP_HASH_C192, h192, 16);
8102 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8103 					  EIR_SSP_RAND_R192, r192, 16);
8104 	}
8105 
8106 	if (h256 && r256) {
8107 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8108 					  EIR_SSP_HASH_C256, h256, 16);
8109 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8110 					  EIR_SSP_RAND_R256, r256, 16);
8111 	}
8112 
8113 send_rsp:
8114 	mgmt_rp->type = mgmt_cp->type;
8115 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8116 
8117 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8118 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8119 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8120 	if (err < 0 || status)
8121 		goto done;
8122 
8123 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8124 
8125 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8126 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8127 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8128 done:
8129 	if (skb && !IS_ERR(skb))
8130 		kfree_skb(skb);
8131 
8132 	kfree(mgmt_rp);
8133 	mgmt_pending_remove(cmd);
8134 }
8135 
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8136 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8137 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8138 {
8139 	struct mgmt_pending_cmd *cmd;
8140 	int err;
8141 
8142 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8143 			       cp, sizeof(*cp));
8144 	if (!cmd)
8145 		return -ENOMEM;
8146 
8147 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8148 				 read_local_oob_ext_data_complete);
8149 
8150 	if (err < 0) {
8151 		mgmt_pending_remove(cmd);
8152 		return err;
8153 	}
8154 
8155 	return 0;
8156 }
8157 
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8158 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8159 				   void *data, u16 data_len)
8160 {
8161 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8162 	struct mgmt_rp_read_local_oob_ext_data *rp;
8163 	size_t rp_len;
8164 	u16 eir_len;
8165 	u8 status, flags, role, addr[7], hash[16], rand[16];
8166 	int err;
8167 
8168 	bt_dev_dbg(hdev, "sock %p", sk);
8169 
8170 	if (hdev_is_powered(hdev)) {
8171 		switch (cp->type) {
8172 		case BIT(BDADDR_BREDR):
8173 			status = mgmt_bredr_support(hdev);
8174 			if (status)
8175 				eir_len = 0;
8176 			else
8177 				eir_len = 5;
8178 			break;
8179 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8180 			status = mgmt_le_support(hdev);
8181 			if (status)
8182 				eir_len = 0;
8183 			else
8184 				eir_len = 9 + 3 + 18 + 18 + 3;
8185 			break;
8186 		default:
8187 			status = MGMT_STATUS_INVALID_PARAMS;
8188 			eir_len = 0;
8189 			break;
8190 		}
8191 	} else {
8192 		status = MGMT_STATUS_NOT_POWERED;
8193 		eir_len = 0;
8194 	}
8195 
8196 	rp_len = sizeof(*rp) + eir_len;
8197 	rp = kmalloc(rp_len, GFP_ATOMIC);
8198 	if (!rp)
8199 		return -ENOMEM;
8200 
8201 	if (!status && !lmp_ssp_capable(hdev)) {
8202 		status = MGMT_STATUS_NOT_SUPPORTED;
8203 		eir_len = 0;
8204 	}
8205 
8206 	if (status)
8207 		goto complete;
8208 
8209 	hci_dev_lock(hdev);
8210 
8211 	eir_len = 0;
8212 	switch (cp->type) {
8213 	case BIT(BDADDR_BREDR):
8214 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8215 			err = read_local_ssp_oob_req(hdev, sk, cp);
8216 			hci_dev_unlock(hdev);
8217 			if (!err)
8218 				goto done;
8219 
8220 			status = MGMT_STATUS_FAILED;
8221 			goto complete;
8222 		} else {
8223 			eir_len = eir_append_data(rp->eir, eir_len,
8224 						  EIR_CLASS_OF_DEV,
8225 						  hdev->dev_class, 3);
8226 		}
8227 		break;
8228 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8229 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8230 		    smp_generate_oob(hdev, hash, rand) < 0) {
8231 			hci_dev_unlock(hdev);
8232 			status = MGMT_STATUS_FAILED;
8233 			goto complete;
8234 		}
8235 
8236 		/* This should return the active RPA, but since the RPA
8237 		 * is only programmed on demand, it is really hard to fill
8238 		 * this in at the moment. For now disallow retrieving
8239 		 * local out-of-band data when privacy is in use.
8240 		 *
8241 		 * Returning the identity address will not help here since
8242 		 * pairing happens before the identity resolving key is
8243 		 * known and thus the connection establishment happens
8244 		 * based on the RPA and not the identity address.
8245 		 */
8246 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8247 			hci_dev_unlock(hdev);
8248 			status = MGMT_STATUS_REJECTED;
8249 			goto complete;
8250 		}
8251 
8252 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8253 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8254 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8255 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8256 			memcpy(addr, &hdev->static_addr, 6);
8257 			addr[6] = 0x01;
8258 		} else {
8259 			memcpy(addr, &hdev->bdaddr, 6);
8260 			addr[6] = 0x00;
8261 		}
8262 
8263 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8264 					  addr, sizeof(addr));
8265 
8266 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8267 			role = 0x02;
8268 		else
8269 			role = 0x01;
8270 
8271 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8272 					  &role, sizeof(role));
8273 
8274 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8275 			eir_len = eir_append_data(rp->eir, eir_len,
8276 						  EIR_LE_SC_CONFIRM,
8277 						  hash, sizeof(hash));
8278 
8279 			eir_len = eir_append_data(rp->eir, eir_len,
8280 						  EIR_LE_SC_RANDOM,
8281 						  rand, sizeof(rand));
8282 		}
8283 
8284 		flags = mgmt_get_adv_discov_flags(hdev);
8285 
8286 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8287 			flags |= LE_AD_NO_BREDR;
8288 
8289 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8290 					  &flags, sizeof(flags));
8291 		break;
8292 	}
8293 
8294 	hci_dev_unlock(hdev);
8295 
8296 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8297 
8298 	status = MGMT_STATUS_SUCCESS;
8299 
8300 complete:
8301 	rp->type = cp->type;
8302 	rp->eir_len = cpu_to_le16(eir_len);
8303 
8304 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8305 				status, rp, sizeof(*rp) + eir_len);
8306 	if (err < 0 || status)
8307 		goto done;
8308 
8309 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8310 				 rp, sizeof(*rp) + eir_len,
8311 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8312 
8313 done:
8314 	kfree(rp);
8315 
8316 	return err;
8317 }
8318 
get_supported_adv_flags(struct hci_dev * hdev)8319 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8320 {
8321 	u32 flags = 0;
8322 
8323 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8324 	flags |= MGMT_ADV_FLAG_DISCOV;
8325 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8326 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8327 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8328 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8329 	flags |= MGMT_ADV_PARAM_DURATION;
8330 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8331 	flags |= MGMT_ADV_PARAM_INTERVALS;
8332 	flags |= MGMT_ADV_PARAM_TX_POWER;
8333 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8334 
8335 	/* In extended adv TX_POWER returned from Set Adv Param
8336 	 * will be always valid.
8337 	 */
8338 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8339 		flags |= MGMT_ADV_FLAG_TX_POWER;
8340 
8341 	if (ext_adv_capable(hdev)) {
8342 		flags |= MGMT_ADV_FLAG_SEC_1M;
8343 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8344 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8345 
8346 		if (le_2m_capable(hdev))
8347 			flags |= MGMT_ADV_FLAG_SEC_2M;
8348 
8349 		if (le_coded_capable(hdev))
8350 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8351 	}
8352 
8353 	return flags;
8354 }
8355 
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8356 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8357 			     void *data, u16 data_len)
8358 {
8359 	struct mgmt_rp_read_adv_features *rp;
8360 	size_t rp_len;
8361 	int err;
8362 	struct adv_info *adv_instance;
8363 	u32 supported_flags;
8364 	u8 *instance;
8365 
8366 	bt_dev_dbg(hdev, "sock %p", sk);
8367 
8368 	if (!lmp_le_capable(hdev))
8369 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8370 				       MGMT_STATUS_REJECTED);
8371 
8372 	hci_dev_lock(hdev);
8373 
8374 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8375 	rp = kmalloc(rp_len, GFP_ATOMIC);
8376 	if (!rp) {
8377 		hci_dev_unlock(hdev);
8378 		return -ENOMEM;
8379 	}
8380 
8381 	supported_flags = get_supported_adv_flags(hdev);
8382 
8383 	rp->supported_flags = cpu_to_le32(supported_flags);
8384 	rp->max_adv_data_len = max_adv_len(hdev);
8385 	rp->max_scan_rsp_len = max_adv_len(hdev);
8386 	rp->max_instances = hdev->le_num_of_adv_sets;
8387 	rp->num_instances = hdev->adv_instance_cnt;
8388 
8389 	instance = rp->instance;
8390 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8391 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8392 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8393 			*instance = adv_instance->instance;
8394 			instance++;
8395 		} else {
8396 			rp->num_instances--;
8397 			rp_len--;
8398 		}
8399 	}
8400 
8401 	hci_dev_unlock(hdev);
8402 
8403 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8404 				MGMT_STATUS_SUCCESS, rp, rp_len);
8405 
8406 	kfree(rp);
8407 
8408 	return err;
8409 }
8410 
calculate_name_len(struct hci_dev * hdev)8411 static u8 calculate_name_len(struct hci_dev *hdev)
8412 {
8413 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8414 
8415 	return eir_append_local_name(hdev, buf, 0);
8416 }
8417 
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8418 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8419 			   bool is_adv_data)
8420 {
8421 	u8 max_len = max_adv_len(hdev);
8422 
8423 	if (is_adv_data) {
8424 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8425 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8426 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8427 			max_len -= 3;
8428 
8429 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8430 			max_len -= 3;
8431 	} else {
8432 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8433 			max_len -= calculate_name_len(hdev);
8434 
8435 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8436 			max_len -= 4;
8437 	}
8438 
8439 	return max_len;
8440 }
8441 
flags_managed(u32 adv_flags)8442 static bool flags_managed(u32 adv_flags)
8443 {
8444 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8445 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8446 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8447 }
8448 
tx_power_managed(u32 adv_flags)8449 static bool tx_power_managed(u32 adv_flags)
8450 {
8451 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8452 }
8453 
name_managed(u32 adv_flags)8454 static bool name_managed(u32 adv_flags)
8455 {
8456 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8457 }
8458 
appearance_managed(u32 adv_flags)8459 static bool appearance_managed(u32 adv_flags)
8460 {
8461 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8462 }
8463 
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8464 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8465 			      u8 len, bool is_adv_data)
8466 {
8467 	int i, cur_len;
8468 	u8 max_len;
8469 
8470 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8471 
8472 	if (len > max_len)
8473 		return false;
8474 
8475 	/* Make sure that the data is correctly formatted. */
8476 	for (i = 0; i < len; i += (cur_len + 1)) {
8477 		cur_len = data[i];
8478 
8479 		if (!cur_len)
8480 			continue;
8481 
8482 		if (data[i + 1] == EIR_FLAGS &&
8483 		    (!is_adv_data || flags_managed(adv_flags)))
8484 			return false;
8485 
8486 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8487 			return false;
8488 
8489 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8490 			return false;
8491 
8492 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8493 			return false;
8494 
8495 		if (data[i + 1] == EIR_APPEARANCE &&
8496 		    appearance_managed(adv_flags))
8497 			return false;
8498 
8499 		/* If the current field length would exceed the total data
8500 		 * length, then it's invalid.
8501 		 */
8502 		if (i + cur_len >= len)
8503 			return false;
8504 	}
8505 
8506 	return true;
8507 }
8508 
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8509 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8510 {
8511 	u32 supported_flags, phy_flags;
8512 
8513 	/* The current implementation only supports a subset of the specified
8514 	 * flags. Also need to check mutual exclusiveness of sec flags.
8515 	 */
8516 	supported_flags = get_supported_adv_flags(hdev);
8517 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8518 	if (adv_flags & ~supported_flags ||
8519 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8520 		return false;
8521 
8522 	return true;
8523 }
8524 
adv_busy(struct hci_dev * hdev)8525 static bool adv_busy(struct hci_dev *hdev)
8526 {
8527 	return pending_find(MGMT_OP_SET_LE, hdev);
8528 }
8529 
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8530 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8531 			     int err)
8532 {
8533 	struct adv_info *adv, *n;
8534 
8535 	bt_dev_dbg(hdev, "err %d", err);
8536 
8537 	hci_dev_lock(hdev);
8538 
8539 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8540 		u8 instance;
8541 
8542 		if (!adv->pending)
8543 			continue;
8544 
8545 		if (!err) {
8546 			adv->pending = false;
8547 			continue;
8548 		}
8549 
8550 		instance = adv->instance;
8551 
8552 		if (hdev->cur_adv_instance == instance)
8553 			cancel_adv_timeout(hdev);
8554 
8555 		hci_remove_adv_instance(hdev, instance);
8556 		mgmt_advertising_removed(sk, hdev, instance);
8557 	}
8558 
8559 	hci_dev_unlock(hdev);
8560 }
8561 
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8562 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8563 {
8564 	struct mgmt_pending_cmd *cmd = data;
8565 	struct mgmt_cp_add_advertising *cp = cmd->param;
8566 	struct mgmt_rp_add_advertising rp;
8567 
8568 	memset(&rp, 0, sizeof(rp));
8569 
8570 	rp.instance = cp->instance;
8571 
8572 	if (err)
8573 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8574 				mgmt_status(err));
8575 	else
8576 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8577 				  mgmt_status(err), &rp, sizeof(rp));
8578 
8579 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8580 
8581 	mgmt_pending_free(cmd);
8582 }
8583 
add_advertising_sync(struct hci_dev * hdev,void * data)8584 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8585 {
8586 	struct mgmt_pending_cmd *cmd = data;
8587 	struct mgmt_cp_add_advertising *cp = cmd->param;
8588 
8589 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8590 }
8591 
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8592 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8593 			   void *data, u16 data_len)
8594 {
8595 	struct mgmt_cp_add_advertising *cp = data;
8596 	struct mgmt_rp_add_advertising rp;
8597 	u32 flags;
8598 	u8 status;
8599 	u16 timeout, duration;
8600 	unsigned int prev_instance_cnt;
8601 	u8 schedule_instance = 0;
8602 	struct adv_info *adv, *next_instance;
8603 	int err;
8604 	struct mgmt_pending_cmd *cmd;
8605 
8606 	bt_dev_dbg(hdev, "sock %p", sk);
8607 
8608 	status = mgmt_le_support(hdev);
8609 	if (status)
8610 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8611 				       status);
8612 
8613 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8614 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8615 				       MGMT_STATUS_INVALID_PARAMS);
8616 
8617 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8618 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8619 				       MGMT_STATUS_INVALID_PARAMS);
8620 
8621 	flags = __le32_to_cpu(cp->flags);
8622 	timeout = __le16_to_cpu(cp->timeout);
8623 	duration = __le16_to_cpu(cp->duration);
8624 
8625 	if (!requested_adv_flags_are_valid(hdev, flags))
8626 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8627 				       MGMT_STATUS_INVALID_PARAMS);
8628 
8629 	hci_dev_lock(hdev);
8630 
8631 	if (timeout && !hdev_is_powered(hdev)) {
8632 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8633 				      MGMT_STATUS_REJECTED);
8634 		goto unlock;
8635 	}
8636 
8637 	if (adv_busy(hdev)) {
8638 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8639 				      MGMT_STATUS_BUSY);
8640 		goto unlock;
8641 	}
8642 
8643 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8644 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8645 			       cp->scan_rsp_len, false)) {
8646 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8647 				      MGMT_STATUS_INVALID_PARAMS);
8648 		goto unlock;
8649 	}
8650 
8651 	prev_instance_cnt = hdev->adv_instance_cnt;
8652 
8653 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8654 				   cp->adv_data_len, cp->data,
8655 				   cp->scan_rsp_len,
8656 				   cp->data + cp->adv_data_len,
8657 				   timeout, duration,
8658 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8659 				   hdev->le_adv_min_interval,
8660 				   hdev->le_adv_max_interval, 0);
8661 	if (IS_ERR(adv)) {
8662 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8663 				      MGMT_STATUS_FAILED);
8664 		goto unlock;
8665 	}
8666 
8667 	/* Only trigger an advertising added event if a new instance was
8668 	 * actually added.
8669 	 */
8670 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8671 		mgmt_advertising_added(sk, hdev, cp->instance);
8672 
8673 	if (hdev->cur_adv_instance == cp->instance) {
8674 		/* If the currently advertised instance is being changed then
8675 		 * cancel the current advertising and schedule the next
8676 		 * instance. If there is only one instance then the overridden
8677 		 * advertising data will be visible right away.
8678 		 */
8679 		cancel_adv_timeout(hdev);
8680 
8681 		next_instance = hci_get_next_instance(hdev, cp->instance);
8682 		if (next_instance)
8683 			schedule_instance = next_instance->instance;
8684 	} else if (!hdev->adv_instance_timeout) {
8685 		/* Immediately advertise the new instance if no other
8686 		 * instance is currently being advertised.
8687 		 */
8688 		schedule_instance = cp->instance;
8689 	}
8690 
8691 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8692 	 * there is no instance to be advertised then we have no HCI
8693 	 * communication to make. Simply return.
8694 	 */
8695 	if (!hdev_is_powered(hdev) ||
8696 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8697 	    !schedule_instance) {
8698 		rp.instance = cp->instance;
8699 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8700 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8701 		goto unlock;
8702 	}
8703 
8704 	/* We're good to go, update advertising data, parameters, and start
8705 	 * advertising.
8706 	 */
8707 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8708 			       data_len);
8709 	if (!cmd) {
8710 		err = -ENOMEM;
8711 		goto unlock;
8712 	}
8713 
8714 	cp->instance = schedule_instance;
8715 
8716 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8717 				 add_advertising_complete);
8718 	if (err < 0)
8719 		mgmt_pending_free(cmd);
8720 
8721 unlock:
8722 	hci_dev_unlock(hdev);
8723 
8724 	return err;
8725 }
8726 
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8727 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8728 					int err)
8729 {
8730 	struct mgmt_pending_cmd *cmd = data;
8731 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8732 	struct mgmt_rp_add_ext_adv_params rp;
8733 	struct adv_info *adv;
8734 	u32 flags;
8735 
8736 	BT_DBG("%s", hdev->name);
8737 
8738 	hci_dev_lock(hdev);
8739 
8740 	adv = hci_find_adv_instance(hdev, cp->instance);
8741 	if (!adv)
8742 		goto unlock;
8743 
8744 	rp.instance = cp->instance;
8745 	rp.tx_power = adv->tx_power;
8746 
8747 	/* While we're at it, inform userspace of the available space for this
8748 	 * advertisement, given the flags that will be used.
8749 	 */
8750 	flags = __le32_to_cpu(cp->flags);
8751 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8752 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8753 
8754 	if (err) {
8755 		/* If this advertisement was previously advertising and we
8756 		 * failed to update it, we signal that it has been removed and
8757 		 * delete its structure
8758 		 */
8759 		if (!adv->pending)
8760 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8761 
8762 		hci_remove_adv_instance(hdev, cp->instance);
8763 
8764 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8765 				mgmt_status(err));
8766 	} else {
8767 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8768 				  mgmt_status(err), &rp, sizeof(rp));
8769 	}
8770 
8771 unlock:
8772 	if (cmd)
8773 		mgmt_pending_free(cmd);
8774 
8775 	hci_dev_unlock(hdev);
8776 }
8777 
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8778 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8779 {
8780 	struct mgmt_pending_cmd *cmd = data;
8781 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8782 
8783 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8784 }
8785 
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8786 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8787 			      void *data, u16 data_len)
8788 {
8789 	struct mgmt_cp_add_ext_adv_params *cp = data;
8790 	struct mgmt_rp_add_ext_adv_params rp;
8791 	struct mgmt_pending_cmd *cmd = NULL;
8792 	struct adv_info *adv;
8793 	u32 flags, min_interval, max_interval;
8794 	u16 timeout, duration;
8795 	u8 status;
8796 	s8 tx_power;
8797 	int err;
8798 
8799 	BT_DBG("%s", hdev->name);
8800 
8801 	status = mgmt_le_support(hdev);
8802 	if (status)
8803 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8804 				       status);
8805 
8806 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8807 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8808 				       MGMT_STATUS_INVALID_PARAMS);
8809 
8810 	/* The purpose of breaking add_advertising into two separate MGMT calls
8811 	 * for params and data is to allow more parameters to be added to this
8812 	 * structure in the future. For this reason, we verify that we have the
8813 	 * bare minimum structure we know of when the interface was defined. Any
8814 	 * extra parameters we don't know about will be ignored in this request.
8815 	 */
8816 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8817 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8818 				       MGMT_STATUS_INVALID_PARAMS);
8819 
8820 	flags = __le32_to_cpu(cp->flags);
8821 
8822 	if (!requested_adv_flags_are_valid(hdev, flags))
8823 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8824 				       MGMT_STATUS_INVALID_PARAMS);
8825 
8826 	hci_dev_lock(hdev);
8827 
8828 	/* In new interface, we require that we are powered to register */
8829 	if (!hdev_is_powered(hdev)) {
8830 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8831 				      MGMT_STATUS_REJECTED);
8832 		goto unlock;
8833 	}
8834 
8835 	if (adv_busy(hdev)) {
8836 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8837 				      MGMT_STATUS_BUSY);
8838 		goto unlock;
8839 	}
8840 
8841 	/* Parse defined parameters from request, use defaults otherwise */
8842 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8843 		  __le16_to_cpu(cp->timeout) : 0;
8844 
8845 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8846 		   __le16_to_cpu(cp->duration) :
8847 		   hdev->def_multi_adv_rotation_duration;
8848 
8849 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8850 		       __le32_to_cpu(cp->min_interval) :
8851 		       hdev->le_adv_min_interval;
8852 
8853 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8854 		       __le32_to_cpu(cp->max_interval) :
8855 		       hdev->le_adv_max_interval;
8856 
8857 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8858 		   cp->tx_power :
8859 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8860 
8861 	/* Create advertising instance with no advertising or response data */
8862 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8863 				   timeout, duration, tx_power, min_interval,
8864 				   max_interval, 0);
8865 
8866 	if (IS_ERR(adv)) {
8867 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8868 				      MGMT_STATUS_FAILED);
8869 		goto unlock;
8870 	}
8871 
8872 	/* Submit request for advertising params if ext adv available */
8873 	if (ext_adv_capable(hdev)) {
8874 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8875 				       data, data_len);
8876 		if (!cmd) {
8877 			err = -ENOMEM;
8878 			hci_remove_adv_instance(hdev, cp->instance);
8879 			goto unlock;
8880 		}
8881 
8882 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8883 					 add_ext_adv_params_complete);
8884 		if (err < 0)
8885 			mgmt_pending_free(cmd);
8886 	} else {
8887 		rp.instance = cp->instance;
8888 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8889 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8890 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8891 		err = mgmt_cmd_complete(sk, hdev->id,
8892 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8893 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8894 	}
8895 
8896 unlock:
8897 	hci_dev_unlock(hdev);
8898 
8899 	return err;
8900 }
8901 
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8902 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8903 {
8904 	struct mgmt_pending_cmd *cmd = data;
8905 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8906 	struct mgmt_rp_add_advertising rp;
8907 
8908 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8909 
8910 	memset(&rp, 0, sizeof(rp));
8911 
8912 	rp.instance = cp->instance;
8913 
8914 	if (err)
8915 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8916 				mgmt_status(err));
8917 	else
8918 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8919 				  mgmt_status(err), &rp, sizeof(rp));
8920 
8921 	mgmt_pending_free(cmd);
8922 }
8923 
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8924 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8925 {
8926 	struct mgmt_pending_cmd *cmd = data;
8927 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8928 	int err;
8929 
8930 	if (ext_adv_capable(hdev)) {
8931 		err = hci_update_adv_data_sync(hdev, cp->instance);
8932 		if (err)
8933 			return err;
8934 
8935 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8936 		if (err)
8937 			return err;
8938 
8939 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8940 	}
8941 
8942 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8943 }
8944 
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8945 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8946 			    u16 data_len)
8947 {
8948 	struct mgmt_cp_add_ext_adv_data *cp = data;
8949 	struct mgmt_rp_add_ext_adv_data rp;
8950 	u8 schedule_instance = 0;
8951 	struct adv_info *next_instance;
8952 	struct adv_info *adv_instance;
8953 	int err = 0;
8954 	struct mgmt_pending_cmd *cmd;
8955 
8956 	BT_DBG("%s", hdev->name);
8957 
8958 	hci_dev_lock(hdev);
8959 
8960 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8961 
8962 	if (!adv_instance) {
8963 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8964 				      MGMT_STATUS_INVALID_PARAMS);
8965 		goto unlock;
8966 	}
8967 
8968 	/* In new interface, we require that we are powered to register */
8969 	if (!hdev_is_powered(hdev)) {
8970 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8971 				      MGMT_STATUS_REJECTED);
8972 		goto clear_new_instance;
8973 	}
8974 
8975 	if (adv_busy(hdev)) {
8976 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8977 				      MGMT_STATUS_BUSY);
8978 		goto clear_new_instance;
8979 	}
8980 
8981 	/* Validate new data */
8982 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8983 			       cp->adv_data_len, true) ||
8984 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8985 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8986 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8987 				      MGMT_STATUS_INVALID_PARAMS);
8988 		goto clear_new_instance;
8989 	}
8990 
8991 	/* Set the data in the advertising instance */
8992 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8993 				  cp->data, cp->scan_rsp_len,
8994 				  cp->data + cp->adv_data_len);
8995 
8996 	/* If using software rotation, determine next instance to use */
8997 	if (hdev->cur_adv_instance == cp->instance) {
8998 		/* If the currently advertised instance is being changed
8999 		 * then cancel the current advertising and schedule the
9000 		 * next instance. If there is only one instance then the
9001 		 * overridden advertising data will be visible right
9002 		 * away
9003 		 */
9004 		cancel_adv_timeout(hdev);
9005 
9006 		next_instance = hci_get_next_instance(hdev, cp->instance);
9007 		if (next_instance)
9008 			schedule_instance = next_instance->instance;
9009 	} else if (!hdev->adv_instance_timeout) {
9010 		/* Immediately advertise the new instance if no other
9011 		 * instance is currently being advertised.
9012 		 */
9013 		schedule_instance = cp->instance;
9014 	}
9015 
9016 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9017 	 * be advertised then we have no HCI communication to make.
9018 	 * Simply return.
9019 	 */
9020 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9021 		if (adv_instance->pending) {
9022 			mgmt_advertising_added(sk, hdev, cp->instance);
9023 			adv_instance->pending = false;
9024 		}
9025 		rp.instance = cp->instance;
9026 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9027 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9028 		goto unlock;
9029 	}
9030 
9031 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9032 			       data_len);
9033 	if (!cmd) {
9034 		err = -ENOMEM;
9035 		goto clear_new_instance;
9036 	}
9037 
9038 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9039 				 add_ext_adv_data_complete);
9040 	if (err < 0) {
9041 		mgmt_pending_free(cmd);
9042 		goto clear_new_instance;
9043 	}
9044 
9045 	/* We were successful in updating data, so trigger advertising_added
9046 	 * event if this is an instance that wasn't previously advertising. If
9047 	 * a failure occurs in the requests we initiated, we will remove the
9048 	 * instance again in add_advertising_complete
9049 	 */
9050 	if (adv_instance->pending)
9051 		mgmt_advertising_added(sk, hdev, cp->instance);
9052 
9053 	goto unlock;
9054 
9055 clear_new_instance:
9056 	hci_remove_adv_instance(hdev, cp->instance);
9057 
9058 unlock:
9059 	hci_dev_unlock(hdev);
9060 
9061 	return err;
9062 }
9063 
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9064 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9065 					int err)
9066 {
9067 	struct mgmt_pending_cmd *cmd = data;
9068 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9069 	struct mgmt_rp_remove_advertising rp;
9070 
9071 	bt_dev_dbg(hdev, "err %d", err);
9072 
9073 	memset(&rp, 0, sizeof(rp));
9074 	rp.instance = cp->instance;
9075 
9076 	if (err)
9077 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9078 				mgmt_status(err));
9079 	else
9080 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9081 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9082 
9083 	mgmt_pending_free(cmd);
9084 }
9085 
remove_advertising_sync(struct hci_dev * hdev,void * data)9086 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9087 {
9088 	struct mgmt_pending_cmd *cmd = data;
9089 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9090 	int err;
9091 
9092 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9093 	if (err)
9094 		return err;
9095 
9096 	if (list_empty(&hdev->adv_instances))
9097 		err = hci_disable_advertising_sync(hdev);
9098 
9099 	return err;
9100 }
9101 
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9102 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9103 			      void *data, u16 data_len)
9104 {
9105 	struct mgmt_cp_remove_advertising *cp = data;
9106 	struct mgmt_pending_cmd *cmd;
9107 	int err;
9108 
9109 	bt_dev_dbg(hdev, "sock %p", sk);
9110 
9111 	hci_dev_lock(hdev);
9112 
9113 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9114 		err = mgmt_cmd_status(sk, hdev->id,
9115 				      MGMT_OP_REMOVE_ADVERTISING,
9116 				      MGMT_STATUS_INVALID_PARAMS);
9117 		goto unlock;
9118 	}
9119 
9120 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9121 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9122 				      MGMT_STATUS_BUSY);
9123 		goto unlock;
9124 	}
9125 
9126 	if (list_empty(&hdev->adv_instances)) {
9127 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9128 				      MGMT_STATUS_INVALID_PARAMS);
9129 		goto unlock;
9130 	}
9131 
9132 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9133 			       data_len);
9134 	if (!cmd) {
9135 		err = -ENOMEM;
9136 		goto unlock;
9137 	}
9138 
9139 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9140 				 remove_advertising_complete);
9141 	if (err < 0)
9142 		mgmt_pending_free(cmd);
9143 
9144 unlock:
9145 	hci_dev_unlock(hdev);
9146 
9147 	return err;
9148 }
9149 
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9150 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9151 			     void *data, u16 data_len)
9152 {
9153 	struct mgmt_cp_get_adv_size_info *cp = data;
9154 	struct mgmt_rp_get_adv_size_info rp;
9155 	u32 flags, supported_flags;
9156 
9157 	bt_dev_dbg(hdev, "sock %p", sk);
9158 
9159 	if (!lmp_le_capable(hdev))
9160 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9161 				       MGMT_STATUS_REJECTED);
9162 
9163 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9164 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9165 				       MGMT_STATUS_INVALID_PARAMS);
9166 
9167 	flags = __le32_to_cpu(cp->flags);
9168 
9169 	/* The current implementation only supports a subset of the specified
9170 	 * flags.
9171 	 */
9172 	supported_flags = get_supported_adv_flags(hdev);
9173 	if (flags & ~supported_flags)
9174 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9175 				       MGMT_STATUS_INVALID_PARAMS);
9176 
9177 	rp.instance = cp->instance;
9178 	rp.flags = cp->flags;
9179 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9180 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9181 
9182 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9183 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9184 }
9185 
9186 static const struct hci_mgmt_handler mgmt_handlers[] = {
9187 	{ NULL }, /* 0x0000 (no command) */
9188 	{ read_version,            MGMT_READ_VERSION_SIZE,
9189 						HCI_MGMT_NO_HDEV |
9190 						HCI_MGMT_UNTRUSTED },
9191 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9192 						HCI_MGMT_NO_HDEV |
9193 						HCI_MGMT_UNTRUSTED },
9194 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9195 						HCI_MGMT_NO_HDEV |
9196 						HCI_MGMT_UNTRUSTED },
9197 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9198 						HCI_MGMT_UNTRUSTED },
9199 	{ set_powered,             MGMT_SETTING_SIZE },
9200 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9201 	{ set_connectable,         MGMT_SETTING_SIZE },
9202 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9203 	{ set_bondable,            MGMT_SETTING_SIZE },
9204 	{ set_link_security,       MGMT_SETTING_SIZE },
9205 	{ set_ssp,                 MGMT_SETTING_SIZE },
9206 	{ set_hs,                  MGMT_SETTING_SIZE },
9207 	{ set_le,                  MGMT_SETTING_SIZE },
9208 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9209 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9210 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9211 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9212 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9213 						HCI_MGMT_VAR_LEN },
9214 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9215 						HCI_MGMT_VAR_LEN },
9216 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9217 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9218 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9219 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9220 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9221 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9222 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9223 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9224 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9225 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9226 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9227 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9228 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9229 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9230 						HCI_MGMT_VAR_LEN },
9231 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9232 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9233 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9234 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9235 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9236 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9237 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9238 	{ set_advertising,         MGMT_SETTING_SIZE },
9239 	{ set_bredr,               MGMT_SETTING_SIZE },
9240 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9241 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9242 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9243 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9244 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9245 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9246 						HCI_MGMT_VAR_LEN },
9247 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9248 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9249 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9250 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9251 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9252 						HCI_MGMT_VAR_LEN },
9253 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9254 						HCI_MGMT_NO_HDEV |
9255 						HCI_MGMT_UNTRUSTED },
9256 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9257 						HCI_MGMT_UNCONFIGURED |
9258 						HCI_MGMT_UNTRUSTED },
9259 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9260 						HCI_MGMT_UNCONFIGURED },
9261 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9262 						HCI_MGMT_UNCONFIGURED },
9263 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9264 						HCI_MGMT_VAR_LEN },
9265 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9266 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9267 						HCI_MGMT_NO_HDEV |
9268 						HCI_MGMT_UNTRUSTED },
9269 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9270 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9271 						HCI_MGMT_VAR_LEN },
9272 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9273 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9274 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9275 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9276 						HCI_MGMT_UNTRUSTED },
9277 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9278 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9279 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9280 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9281 						HCI_MGMT_VAR_LEN },
9282 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9283 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9284 						HCI_MGMT_UNTRUSTED },
9285 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9286 						HCI_MGMT_UNTRUSTED |
9287 						HCI_MGMT_HDEV_OPTIONAL },
9288 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9289 						HCI_MGMT_VAR_LEN |
9290 						HCI_MGMT_HDEV_OPTIONAL },
9291 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9292 						HCI_MGMT_UNTRUSTED },
9293 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9294 						HCI_MGMT_VAR_LEN },
9295 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9296 						HCI_MGMT_UNTRUSTED },
9297 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9298 						HCI_MGMT_VAR_LEN },
9299 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9300 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9301 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9302 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9303 						HCI_MGMT_VAR_LEN },
9304 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9305 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9306 						HCI_MGMT_VAR_LEN },
9307 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9308 						HCI_MGMT_VAR_LEN },
9309 	{ add_adv_patterns_monitor_rssi,
9310 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9311 						HCI_MGMT_VAR_LEN },
9312 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9313 						HCI_MGMT_VAR_LEN },
9314 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9315 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9316 						HCI_MGMT_VAR_LEN },
9317 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9318 };
9319 
mgmt_index_added(struct hci_dev * hdev)9320 void mgmt_index_added(struct hci_dev *hdev)
9321 {
9322 	struct mgmt_ev_ext_index ev;
9323 
9324 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9325 		return;
9326 
9327 	switch (hdev->dev_type) {
9328 	case HCI_PRIMARY:
9329 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9330 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9331 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9332 			ev.type = 0x01;
9333 		} else {
9334 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9335 					 HCI_MGMT_INDEX_EVENTS);
9336 			ev.type = 0x00;
9337 		}
9338 		break;
9339 	case HCI_AMP:
9340 		ev.type = 0x02;
9341 		break;
9342 	default:
9343 		return;
9344 	}
9345 
9346 	ev.bus = hdev->bus;
9347 
9348 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9349 			 HCI_MGMT_EXT_INDEX_EVENTS);
9350 }
9351 
mgmt_index_removed(struct hci_dev * hdev)9352 void mgmt_index_removed(struct hci_dev *hdev)
9353 {
9354 	struct mgmt_ev_ext_index ev;
9355 	u8 status = MGMT_STATUS_INVALID_INDEX;
9356 
9357 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9358 		return;
9359 
9360 	switch (hdev->dev_type) {
9361 	case HCI_PRIMARY:
9362 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9363 
9364 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9365 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9366 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9367 			ev.type = 0x01;
9368 		} else {
9369 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9370 					 HCI_MGMT_INDEX_EVENTS);
9371 			ev.type = 0x00;
9372 		}
9373 		break;
9374 	case HCI_AMP:
9375 		ev.type = 0x02;
9376 		break;
9377 	default:
9378 		return;
9379 	}
9380 
9381 	ev.bus = hdev->bus;
9382 
9383 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9384 			 HCI_MGMT_EXT_INDEX_EVENTS);
9385 
9386 	/* Cancel any remaining timed work */
9387 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9388 		return;
9389 	cancel_delayed_work_sync(&hdev->discov_off);
9390 	cancel_delayed_work_sync(&hdev->service_cache);
9391 	cancel_delayed_work_sync(&hdev->rpa_expired);
9392 }
9393 
mgmt_power_on(struct hci_dev * hdev,int err)9394 void mgmt_power_on(struct hci_dev *hdev, int err)
9395 {
9396 	struct cmd_lookup match = { NULL, hdev };
9397 
9398 	bt_dev_dbg(hdev, "err %d", err);
9399 
9400 	hci_dev_lock(hdev);
9401 
9402 	if (!err) {
9403 		restart_le_actions(hdev);
9404 		hci_update_passive_scan(hdev);
9405 	}
9406 
9407 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9408 
9409 	new_settings(hdev, match.sk);
9410 
9411 	if (match.sk)
9412 		sock_put(match.sk);
9413 
9414 	hci_dev_unlock(hdev);
9415 }
9416 
__mgmt_power_off(struct hci_dev * hdev)9417 void __mgmt_power_off(struct hci_dev *hdev)
9418 {
9419 	struct cmd_lookup match = { NULL, hdev };
9420 	u8 status, zero_cod[] = { 0, 0, 0 };
9421 
9422 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9423 
9424 	/* If the power off is because of hdev unregistration let
9425 	 * use the appropriate INVALID_INDEX status. Otherwise use
9426 	 * NOT_POWERED. We cover both scenarios here since later in
9427 	 * mgmt_index_removed() any hci_conn callbacks will have already
9428 	 * been triggered, potentially causing misleading DISCONNECTED
9429 	 * status responses.
9430 	 */
9431 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9432 		status = MGMT_STATUS_INVALID_INDEX;
9433 	else
9434 		status = MGMT_STATUS_NOT_POWERED;
9435 
9436 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9437 
9438 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9439 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9440 				   zero_cod, sizeof(zero_cod),
9441 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9442 		ext_info_changed(hdev, NULL);
9443 	}
9444 
9445 	new_settings(hdev, match.sk);
9446 
9447 	if (match.sk)
9448 		sock_put(match.sk);
9449 }
9450 
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9451 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9452 {
9453 	struct mgmt_pending_cmd *cmd;
9454 	u8 status;
9455 
9456 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9457 	if (!cmd)
9458 		return;
9459 
9460 	if (err == -ERFKILL)
9461 		status = MGMT_STATUS_RFKILLED;
9462 	else
9463 		status = MGMT_STATUS_FAILED;
9464 
9465 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9466 
9467 	mgmt_pending_remove(cmd);
9468 }
9469 
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9470 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9471 		       bool persistent)
9472 {
9473 	struct mgmt_ev_new_link_key ev;
9474 
9475 	memset(&ev, 0, sizeof(ev));
9476 
9477 	ev.store_hint = persistent;
9478 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9479 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9480 	ev.key.type = key->type;
9481 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9482 	ev.key.pin_len = key->pin_len;
9483 
9484 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9485 }
9486 
mgmt_ltk_type(struct smp_ltk * ltk)9487 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9488 {
9489 	switch (ltk->type) {
9490 	case SMP_LTK:
9491 	case SMP_LTK_RESPONDER:
9492 		if (ltk->authenticated)
9493 			return MGMT_LTK_AUTHENTICATED;
9494 		return MGMT_LTK_UNAUTHENTICATED;
9495 	case SMP_LTK_P256:
9496 		if (ltk->authenticated)
9497 			return MGMT_LTK_P256_AUTH;
9498 		return MGMT_LTK_P256_UNAUTH;
9499 	case SMP_LTK_P256_DEBUG:
9500 		return MGMT_LTK_P256_DEBUG;
9501 	}
9502 
9503 	return MGMT_LTK_UNAUTHENTICATED;
9504 }
9505 
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9506 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9507 {
9508 	struct mgmt_ev_new_long_term_key ev;
9509 
9510 	memset(&ev, 0, sizeof(ev));
9511 
9512 	/* Devices using resolvable or non-resolvable random addresses
9513 	 * without providing an identity resolving key don't require
9514 	 * to store long term keys. Their addresses will change the
9515 	 * next time around.
9516 	 *
9517 	 * Only when a remote device provides an identity address
9518 	 * make sure the long term key is stored. If the remote
9519 	 * identity is known, the long term keys are internally
9520 	 * mapped to the identity address. So allow static random
9521 	 * and public addresses here.
9522 	 */
9523 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9524 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9525 		ev.store_hint = 0x00;
9526 	else
9527 		ev.store_hint = persistent;
9528 
9529 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9530 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9531 	ev.key.type = mgmt_ltk_type(key);
9532 	ev.key.enc_size = key->enc_size;
9533 	ev.key.ediv = key->ediv;
9534 	ev.key.rand = key->rand;
9535 
9536 	if (key->type == SMP_LTK)
9537 		ev.key.initiator = 1;
9538 
9539 	/* Make sure we copy only the significant bytes based on the
9540 	 * encryption key size, and set the rest of the value to zeroes.
9541 	 */
9542 	memcpy(ev.key.val, key->val, key->enc_size);
9543 	memset(ev.key.val + key->enc_size, 0,
9544 	       sizeof(ev.key.val) - key->enc_size);
9545 
9546 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9547 }
9548 
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9549 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9550 {
9551 	struct mgmt_ev_new_irk ev;
9552 
9553 	memset(&ev, 0, sizeof(ev));
9554 
9555 	ev.store_hint = persistent;
9556 
9557 	bacpy(&ev.rpa, &irk->rpa);
9558 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9559 	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9560 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9561 
9562 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9563 }
9564 
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9565 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9566 		   bool persistent)
9567 {
9568 	struct mgmt_ev_new_csrk ev;
9569 
9570 	memset(&ev, 0, sizeof(ev));
9571 
9572 	/* Devices using resolvable or non-resolvable random addresses
9573 	 * without providing an identity resolving key don't require
9574 	 * to store signature resolving keys. Their addresses will change
9575 	 * the next time around.
9576 	 *
9577 	 * Only when a remote device provides an identity address
9578 	 * make sure the signature resolving key is stored. So allow
9579 	 * static random and public addresses here.
9580 	 */
9581 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9582 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9583 		ev.store_hint = 0x00;
9584 	else
9585 		ev.store_hint = persistent;
9586 
9587 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9588 	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9589 	ev.key.type = csrk->type;
9590 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9591 
9592 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9593 }
9594 
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9595 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9596 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9597 			 u16 max_interval, u16 latency, u16 timeout)
9598 {
9599 	struct mgmt_ev_new_conn_param ev;
9600 
9601 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9602 		return;
9603 
9604 	memset(&ev, 0, sizeof(ev));
9605 	bacpy(&ev.addr.bdaddr, bdaddr);
9606 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9607 	ev.store_hint = store_hint;
9608 	ev.min_interval = cpu_to_le16(min_interval);
9609 	ev.max_interval = cpu_to_le16(max_interval);
9610 	ev.latency = cpu_to_le16(latency);
9611 	ev.timeout = cpu_to_le16(timeout);
9612 
9613 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9614 }
9615 
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9616 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9617 			   u8 *name, u8 name_len)
9618 {
9619 	struct sk_buff *skb;
9620 	struct mgmt_ev_device_connected *ev;
9621 	u16 eir_len = 0;
9622 	u32 flags = 0;
9623 
9624 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9625 		return;
9626 
9627 	/* allocate buff for LE or BR/EDR adv */
9628 	if (conn->le_adv_data_len > 0)
9629 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9630 				     sizeof(*ev) + conn->le_adv_data_len);
9631 	else
9632 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9633 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9634 				     eir_precalc_len(sizeof(conn->dev_class)));
9635 
9636 	ev = skb_put(skb, sizeof(*ev));
9637 	bacpy(&ev->addr.bdaddr, &conn->dst);
9638 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9639 
9640 	if (conn->out)
9641 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9642 
9643 	ev->flags = __cpu_to_le32(flags);
9644 
9645 	/* We must ensure that the EIR Data fields are ordered and
9646 	 * unique. Keep it simple for now and avoid the problem by not
9647 	 * adding any BR/EDR data to the LE adv.
9648 	 */
9649 	if (conn->le_adv_data_len > 0) {
9650 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9651 		eir_len = conn->le_adv_data_len;
9652 	} else {
9653 		if (name)
9654 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9655 
9656 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9657 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9658 						    conn->dev_class, sizeof(conn->dev_class));
9659 	}
9660 
9661 	ev->eir_len = cpu_to_le16(eir_len);
9662 
9663 	mgmt_event_skb(skb, NULL);
9664 }
9665 
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)9666 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9667 {
9668 	struct sock **sk = data;
9669 
9670 	cmd->cmd_complete(cmd, 0);
9671 
9672 	*sk = cmd->sk;
9673 	sock_hold(*sk);
9674 
9675 	mgmt_pending_remove(cmd);
9676 }
9677 
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9678 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9679 {
9680 	struct hci_dev *hdev = data;
9681 	struct mgmt_cp_unpair_device *cp = cmd->param;
9682 
9683 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9684 
9685 	cmd->cmd_complete(cmd, 0);
9686 	mgmt_pending_remove(cmd);
9687 }
9688 
mgmt_powering_down(struct hci_dev * hdev)9689 bool mgmt_powering_down(struct hci_dev *hdev)
9690 {
9691 	struct mgmt_pending_cmd *cmd;
9692 	struct mgmt_mode *cp;
9693 
9694 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9695 	if (!cmd)
9696 		return false;
9697 
9698 	cp = cmd->param;
9699 	if (!cp->val)
9700 		return true;
9701 
9702 	return false;
9703 }
9704 
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9705 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9706 			      u8 link_type, u8 addr_type, u8 reason,
9707 			      bool mgmt_connected)
9708 {
9709 	struct mgmt_ev_device_disconnected ev;
9710 	struct sock *sk = NULL;
9711 
9712 	if (!mgmt_connected)
9713 		return;
9714 
9715 	if (link_type != ACL_LINK && link_type != LE_LINK)
9716 		return;
9717 
9718 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9719 
9720 	bacpy(&ev.addr.bdaddr, bdaddr);
9721 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9722 	ev.reason = reason;
9723 
9724 	/* Report disconnects due to suspend */
9725 	if (hdev->suspended)
9726 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9727 
9728 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9729 
9730 	if (sk)
9731 		sock_put(sk);
9732 
9733 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9734 			     hdev);
9735 }
9736 
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9737 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9738 			    u8 link_type, u8 addr_type, u8 status)
9739 {
9740 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9741 	struct mgmt_cp_disconnect *cp;
9742 	struct mgmt_pending_cmd *cmd;
9743 
9744 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9745 			     hdev);
9746 
9747 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9748 	if (!cmd)
9749 		return;
9750 
9751 	cp = cmd->param;
9752 
9753 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9754 		return;
9755 
9756 	if (cp->addr.type != bdaddr_type)
9757 		return;
9758 
9759 	cmd->cmd_complete(cmd, mgmt_status(status));
9760 	mgmt_pending_remove(cmd);
9761 }
9762 
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9763 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9764 			 u8 addr_type, u8 status)
9765 {
9766 	struct mgmt_ev_connect_failed ev;
9767 
9768 	bacpy(&ev.addr.bdaddr, bdaddr);
9769 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9770 	ev.status = mgmt_status(status);
9771 
9772 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9773 }
9774 
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9775 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9776 {
9777 	struct mgmt_ev_pin_code_request ev;
9778 
9779 	bacpy(&ev.addr.bdaddr, bdaddr);
9780 	ev.addr.type = BDADDR_BREDR;
9781 	ev.secure = secure;
9782 
9783 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9784 }
9785 
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9786 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9787 				  u8 status)
9788 {
9789 	struct mgmt_pending_cmd *cmd;
9790 
9791 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9792 	if (!cmd)
9793 		return;
9794 
9795 	cmd->cmd_complete(cmd, mgmt_status(status));
9796 	mgmt_pending_remove(cmd);
9797 }
9798 
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9799 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9800 				      u8 status)
9801 {
9802 	struct mgmt_pending_cmd *cmd;
9803 
9804 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9805 	if (!cmd)
9806 		return;
9807 
9808 	cmd->cmd_complete(cmd, mgmt_status(status));
9809 	mgmt_pending_remove(cmd);
9810 }
9811 
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9812 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9813 			      u8 link_type, u8 addr_type, u32 value,
9814 			      u8 confirm_hint)
9815 {
9816 	struct mgmt_ev_user_confirm_request ev;
9817 
9818 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9819 
9820 	bacpy(&ev.addr.bdaddr, bdaddr);
9821 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9822 	ev.confirm_hint = confirm_hint;
9823 	ev.value = cpu_to_le32(value);
9824 
9825 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9826 			  NULL);
9827 }
9828 
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9829 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9830 			      u8 link_type, u8 addr_type)
9831 {
9832 	struct mgmt_ev_user_passkey_request ev;
9833 
9834 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9835 
9836 	bacpy(&ev.addr.bdaddr, bdaddr);
9837 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9838 
9839 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9840 			  NULL);
9841 }
9842 
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9843 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9844 				      u8 link_type, u8 addr_type, u8 status,
9845 				      u8 opcode)
9846 {
9847 	struct mgmt_pending_cmd *cmd;
9848 
9849 	cmd = pending_find(opcode, hdev);
9850 	if (!cmd)
9851 		return -ENOENT;
9852 
9853 	cmd->cmd_complete(cmd, mgmt_status(status));
9854 	mgmt_pending_remove(cmd);
9855 
9856 	return 0;
9857 }
9858 
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9859 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9860 				     u8 link_type, u8 addr_type, u8 status)
9861 {
9862 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9863 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9864 }
9865 
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9866 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9867 					 u8 link_type, u8 addr_type, u8 status)
9868 {
9869 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9870 					  status,
9871 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9872 }
9873 
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9874 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9875 				     u8 link_type, u8 addr_type, u8 status)
9876 {
9877 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9878 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9879 }
9880 
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9881 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9882 					 u8 link_type, u8 addr_type, u8 status)
9883 {
9884 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9885 					  status,
9886 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9887 }
9888 
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9889 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9890 			     u8 link_type, u8 addr_type, u32 passkey,
9891 			     u8 entered)
9892 {
9893 	struct mgmt_ev_passkey_notify ev;
9894 
9895 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9896 
9897 	bacpy(&ev.addr.bdaddr, bdaddr);
9898 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9899 	ev.passkey = __cpu_to_le32(passkey);
9900 	ev.entered = entered;
9901 
9902 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9903 }
9904 
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9905 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9906 {
9907 	struct mgmt_ev_auth_failed ev;
9908 	struct mgmt_pending_cmd *cmd;
9909 	u8 status = mgmt_status(hci_status);
9910 
9911 	bacpy(&ev.addr.bdaddr, &conn->dst);
9912 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9913 	ev.status = status;
9914 
9915 	cmd = find_pairing(conn);
9916 
9917 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9918 		    cmd ? cmd->sk : NULL);
9919 
9920 	if (cmd) {
9921 		cmd->cmd_complete(cmd, status);
9922 		mgmt_pending_remove(cmd);
9923 	}
9924 }
9925 
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9926 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9927 {
9928 	struct cmd_lookup match = { NULL, hdev };
9929 	bool changed;
9930 
9931 	if (status) {
9932 		u8 mgmt_err = mgmt_status(status);
9933 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9934 				     cmd_status_rsp, &mgmt_err);
9935 		return;
9936 	}
9937 
9938 	if (test_bit(HCI_AUTH, &hdev->flags))
9939 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9940 	else
9941 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9942 
9943 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9944 			     &match);
9945 
9946 	if (changed)
9947 		new_settings(hdev, match.sk);
9948 
9949 	if (match.sk)
9950 		sock_put(match.sk);
9951 }
9952 
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9953 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9954 {
9955 	struct cmd_lookup *match = data;
9956 
9957 	if (match->sk == NULL) {
9958 		match->sk = cmd->sk;
9959 		sock_hold(match->sk);
9960 	}
9961 }
9962 
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9963 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9964 				    u8 status)
9965 {
9966 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9967 
9968 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9969 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9970 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9971 
9972 	if (!status) {
9973 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9974 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9975 		ext_info_changed(hdev, NULL);
9976 	}
9977 
9978 	if (match.sk)
9979 		sock_put(match.sk);
9980 }
9981 
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9982 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9983 {
9984 	struct mgmt_cp_set_local_name ev;
9985 	struct mgmt_pending_cmd *cmd;
9986 
9987 	if (status)
9988 		return;
9989 
9990 	memset(&ev, 0, sizeof(ev));
9991 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9992 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9993 
9994 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9995 	if (!cmd) {
9996 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9997 
9998 		/* If this is a HCI command related to powering on the
9999 		 * HCI dev don't send any mgmt signals.
10000 		 */
10001 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10002 			return;
10003 	}
10004 
10005 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10006 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10007 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10008 }
10009 
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10010 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10011 {
10012 	int i;
10013 
10014 	for (i = 0; i < uuid_count; i++) {
10015 		if (!memcmp(uuid, uuids[i], 16))
10016 			return true;
10017 	}
10018 
10019 	return false;
10020 }
10021 
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10022 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10023 {
10024 	u16 parsed = 0;
10025 
10026 	while (parsed < eir_len) {
10027 		u8 field_len = eir[0];
10028 		u8 uuid[16];
10029 		int i;
10030 
10031 		if (field_len == 0)
10032 			break;
10033 
10034 		if (eir_len - parsed < field_len + 1)
10035 			break;
10036 
10037 		switch (eir[1]) {
10038 		case EIR_UUID16_ALL:
10039 		case EIR_UUID16_SOME:
10040 			for (i = 0; i + 3 <= field_len; i += 2) {
10041 				memcpy(uuid, bluetooth_base_uuid, 16);
10042 				uuid[13] = eir[i + 3];
10043 				uuid[12] = eir[i + 2];
10044 				if (has_uuid(uuid, uuid_count, uuids))
10045 					return true;
10046 			}
10047 			break;
10048 		case EIR_UUID32_ALL:
10049 		case EIR_UUID32_SOME:
10050 			for (i = 0; i + 5 <= field_len; i += 4) {
10051 				memcpy(uuid, bluetooth_base_uuid, 16);
10052 				uuid[15] = eir[i + 5];
10053 				uuid[14] = eir[i + 4];
10054 				uuid[13] = eir[i + 3];
10055 				uuid[12] = eir[i + 2];
10056 				if (has_uuid(uuid, uuid_count, uuids))
10057 					return true;
10058 			}
10059 			break;
10060 		case EIR_UUID128_ALL:
10061 		case EIR_UUID128_SOME:
10062 			for (i = 0; i + 17 <= field_len; i += 16) {
10063 				memcpy(uuid, eir + i + 2, 16);
10064 				if (has_uuid(uuid, uuid_count, uuids))
10065 					return true;
10066 			}
10067 			break;
10068 		}
10069 
10070 		parsed += field_len + 1;
10071 		eir += field_len + 1;
10072 	}
10073 
10074 	return false;
10075 }
10076 
restart_le_scan(struct hci_dev * hdev)10077 static void restart_le_scan(struct hci_dev *hdev)
10078 {
10079 	/* If controller is not scanning we are done. */
10080 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10081 		return;
10082 
10083 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10084 		       hdev->discovery.scan_start +
10085 		       hdev->discovery.scan_duration))
10086 		return;
10087 
10088 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10089 			   DISCOV_LE_RESTART_DELAY);
10090 }
10091 
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10092 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10093 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10094 {
10095 	/* If a RSSI threshold has been specified, and
10096 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10097 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10098 	 * is set, let it through for further processing, as we might need to
10099 	 * restart the scan.
10100 	 *
10101 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10102 	 * the results are also dropped.
10103 	 */
10104 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10105 	    (rssi == HCI_RSSI_INVALID ||
10106 	    (rssi < hdev->discovery.rssi &&
10107 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10108 		return  false;
10109 
10110 	if (hdev->discovery.uuid_count != 0) {
10111 		/* If a list of UUIDs is provided in filter, results with no
10112 		 * matching UUID should be dropped.
10113 		 */
10114 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10115 				   hdev->discovery.uuids) &&
10116 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10117 				   hdev->discovery.uuid_count,
10118 				   hdev->discovery.uuids))
10119 			return false;
10120 	}
10121 
10122 	/* If duplicate filtering does not report RSSI changes, then restart
10123 	 * scanning to ensure updated result with updated RSSI values.
10124 	 */
10125 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10126 		restart_le_scan(hdev);
10127 
10128 		/* Validate RSSI value against the RSSI threshold once more. */
10129 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10130 		    rssi < hdev->discovery.rssi)
10131 			return false;
10132 	}
10133 
10134 	return true;
10135 }
10136 
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10137 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10138 				  bdaddr_t *bdaddr, u8 addr_type)
10139 {
10140 	struct mgmt_ev_adv_monitor_device_lost ev;
10141 
10142 	ev.monitor_handle = cpu_to_le16(handle);
10143 	bacpy(&ev.addr.bdaddr, bdaddr);
10144 	ev.addr.type = addr_type;
10145 
10146 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10147 		   NULL);
10148 }
10149 
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10150 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10151 					       struct sk_buff *skb,
10152 					       struct sock *skip_sk,
10153 					       u16 handle)
10154 {
10155 	struct sk_buff *advmon_skb;
10156 	size_t advmon_skb_len;
10157 	__le16 *monitor_handle;
10158 
10159 	if (!skb)
10160 		return;
10161 
10162 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10163 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10164 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10165 				    advmon_skb_len);
10166 	if (!advmon_skb)
10167 		return;
10168 
10169 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10170 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10171 	 * store monitor_handle of the matched monitor.
10172 	 */
10173 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10174 	*monitor_handle = cpu_to_le16(handle);
10175 	skb_put_data(advmon_skb, skb->data, skb->len);
10176 
10177 	mgmt_event_skb(advmon_skb, skip_sk);
10178 }
10179 
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10180 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10181 					  bdaddr_t *bdaddr, bool report_device,
10182 					  struct sk_buff *skb,
10183 					  struct sock *skip_sk)
10184 {
10185 	struct monitored_device *dev, *tmp;
10186 	bool matched = false;
10187 	bool notified = false;
10188 
10189 	/* We have received the Advertisement Report because:
10190 	 * 1. the kernel has initiated active discovery
10191 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10192 	 *    passive scanning
10193 	 * 3. if none of the above is true, we have one or more active
10194 	 *    Advertisement Monitor
10195 	 *
10196 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10197 	 * and report ONLY one advertisement per device for the matched Monitor
10198 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10199 	 *
10200 	 * For case 3, since we are not active scanning and all advertisements
10201 	 * received are due to a matched Advertisement Monitor, report all
10202 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10203 	 */
10204 	if (report_device && !hdev->advmon_pend_notify) {
10205 		mgmt_event_skb(skb, skip_sk);
10206 		return;
10207 	}
10208 
10209 	hdev->advmon_pend_notify = false;
10210 
10211 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10212 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10213 			matched = true;
10214 
10215 			if (!dev->notified) {
10216 				mgmt_send_adv_monitor_device_found(hdev, skb,
10217 								   skip_sk,
10218 								   dev->handle);
10219 				notified = true;
10220 				dev->notified = true;
10221 			}
10222 		}
10223 
10224 		if (!dev->notified)
10225 			hdev->advmon_pend_notify = true;
10226 	}
10227 
10228 	if (!report_device &&
10229 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10230 		/* Handle 0 indicates that we are not active scanning and this
10231 		 * is a subsequent advertisement report for an already matched
10232 		 * Advertisement Monitor or the controller offloading support
10233 		 * is not available.
10234 		 */
10235 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10236 	}
10237 
10238 	if (report_device)
10239 		mgmt_event_skb(skb, skip_sk);
10240 	else
10241 		kfree_skb(skb);
10242 }
10243 
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10244 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10245 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10246 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10247 			      u64 instant)
10248 {
10249 	struct sk_buff *skb;
10250 	struct mgmt_ev_mesh_device_found *ev;
10251 	int i, j;
10252 
10253 	if (!hdev->mesh_ad_types[0])
10254 		goto accepted;
10255 
10256 	/* Scan for requested AD types */
10257 	if (eir_len > 0) {
10258 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10259 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10260 				if (!hdev->mesh_ad_types[j])
10261 					break;
10262 
10263 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10264 					goto accepted;
10265 			}
10266 		}
10267 	}
10268 
10269 	if (scan_rsp_len > 0) {
10270 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10271 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10272 				if (!hdev->mesh_ad_types[j])
10273 					break;
10274 
10275 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10276 					goto accepted;
10277 			}
10278 		}
10279 	}
10280 
10281 	return;
10282 
10283 accepted:
10284 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10285 			     sizeof(*ev) + eir_len + scan_rsp_len);
10286 	if (!skb)
10287 		return;
10288 
10289 	ev = skb_put(skb, sizeof(*ev));
10290 
10291 	bacpy(&ev->addr.bdaddr, bdaddr);
10292 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10293 	ev->rssi = rssi;
10294 	ev->flags = cpu_to_le32(flags);
10295 	ev->instant = cpu_to_le64(instant);
10296 
10297 	if (eir_len > 0)
10298 		/* Copy EIR or advertising data into event */
10299 		skb_put_data(skb, eir, eir_len);
10300 
10301 	if (scan_rsp_len > 0)
10302 		/* Append scan response data to event */
10303 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10304 
10305 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10306 
10307 	mgmt_event_skb(skb, NULL);
10308 }
10309 
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10310 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10311 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10312 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10313 		       u64 instant)
10314 {
10315 	struct sk_buff *skb;
10316 	struct mgmt_ev_device_found *ev;
10317 	bool report_device = hci_discovery_active(hdev);
10318 
10319 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10320 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10321 				  eir, eir_len, scan_rsp, scan_rsp_len,
10322 				  instant);
10323 
10324 	/* Don't send events for a non-kernel initiated discovery. With
10325 	 * LE one exception is if we have pend_le_reports > 0 in which
10326 	 * case we're doing passive scanning and want these events.
10327 	 */
10328 	if (!hci_discovery_active(hdev)) {
10329 		if (link_type == ACL_LINK)
10330 			return;
10331 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10332 			report_device = true;
10333 		else if (!hci_is_adv_monitoring(hdev))
10334 			return;
10335 	}
10336 
10337 	if (hdev->discovery.result_filtering) {
10338 		/* We are using service discovery */
10339 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10340 				     scan_rsp_len))
10341 			return;
10342 	}
10343 
10344 	if (hdev->discovery.limited) {
10345 		/* Check for limited discoverable bit */
10346 		if (dev_class) {
10347 			if (!(dev_class[1] & 0x20))
10348 				return;
10349 		} else {
10350 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10351 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10352 				return;
10353 		}
10354 	}
10355 
10356 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10357 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10358 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10359 	if (!skb)
10360 		return;
10361 
10362 	ev = skb_put(skb, sizeof(*ev));
10363 
10364 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10365 	 * RSSI value was reported as 0 when not available. This behavior
10366 	 * is kept when using device discovery. This is required for full
10367 	 * backwards compatibility with the API.
10368 	 *
10369 	 * However when using service discovery, the value 127 will be
10370 	 * returned when the RSSI is not available.
10371 	 */
10372 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10373 	    link_type == ACL_LINK)
10374 		rssi = 0;
10375 
10376 	bacpy(&ev->addr.bdaddr, bdaddr);
10377 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10378 	ev->rssi = rssi;
10379 	ev->flags = cpu_to_le32(flags);
10380 
10381 	if (eir_len > 0)
10382 		/* Copy EIR or advertising data into event */
10383 		skb_put_data(skb, eir, eir_len);
10384 
10385 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10386 		u8 eir_cod[5];
10387 
10388 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10389 					   dev_class, 3);
10390 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10391 	}
10392 
10393 	if (scan_rsp_len > 0)
10394 		/* Append scan response data to event */
10395 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10396 
10397 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10398 
10399 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10400 }
10401 
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10402 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10403 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10404 {
10405 	struct sk_buff *skb;
10406 	struct mgmt_ev_device_found *ev;
10407 	u16 eir_len = 0;
10408 	u32 flags = 0;
10409 
10410 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10411 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10412 
10413 	ev = skb_put(skb, sizeof(*ev));
10414 	bacpy(&ev->addr.bdaddr, bdaddr);
10415 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10416 	ev->rssi = rssi;
10417 
10418 	if (name)
10419 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10420 	else
10421 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10422 
10423 	ev->eir_len = cpu_to_le16(eir_len);
10424 	ev->flags = cpu_to_le32(flags);
10425 
10426 	mgmt_event_skb(skb, NULL);
10427 }
10428 
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10429 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10430 {
10431 	struct mgmt_ev_discovering ev;
10432 
10433 	bt_dev_dbg(hdev, "discovering %u", discovering);
10434 
10435 	memset(&ev, 0, sizeof(ev));
10436 	ev.type = hdev->discovery.type;
10437 	ev.discovering = discovering;
10438 
10439 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10440 }
10441 
mgmt_suspending(struct hci_dev * hdev,u8 state)10442 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10443 {
10444 	struct mgmt_ev_controller_suspend ev;
10445 
10446 	ev.suspend_state = state;
10447 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10448 }
10449 
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10450 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10451 		   u8 addr_type)
10452 {
10453 	struct mgmt_ev_controller_resume ev;
10454 
10455 	ev.wake_reason = reason;
10456 	if (bdaddr) {
10457 		bacpy(&ev.addr.bdaddr, bdaddr);
10458 		ev.addr.type = addr_type;
10459 	} else {
10460 		memset(&ev.addr, 0, sizeof(ev.addr));
10461 	}
10462 
10463 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10464 }
10465 
10466 static struct hci_mgmt_chan chan = {
10467 	.channel	= HCI_CHANNEL_CONTROL,
10468 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10469 	.handlers	= mgmt_handlers,
10470 	.hdev_init	= mgmt_init_hdev,
10471 };
10472 
mgmt_init(void)10473 int mgmt_init(void)
10474 {
10475 	return hci_mgmt_chan_register(&chan);
10476 }
10477 
mgmt_exit(void)10478 void mgmt_exit(void)
10479 {
10480 	hci_mgmt_chan_unregister(&chan);
10481 }
10482 
mgmt_cleanup(struct sock * sk)10483 void mgmt_cleanup(struct sock *sk)
10484 {
10485 	struct mgmt_mesh_tx *mesh_tx;
10486 	struct hci_dev *hdev;
10487 
10488 	read_lock(&hci_dev_list_lock);
10489 
10490 	list_for_each_entry(hdev, &hci_dev_list, list) {
10491 		do {
10492 			mesh_tx = mgmt_mesh_next(hdev, sk);
10493 
10494 			if (mesh_tx)
10495 				mesh_send_complete(hdev, mesh_tx, true);
10496 		} while (mesh_tx);
10497 	}
10498 
10499 	read_unlock(&hci_dev_list_lock);
10500 }
10501