xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 44ad3baf1cca483e418b6aadf2d3994f69e0f16a)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
855 
856 	return settings;
857 }
858 
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 	u32 settings = 0;
862 
863 	if (hdev_is_powered(hdev))
864 		settings |= MGMT_SETTING_POWERED;
865 
866 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 		settings |= MGMT_SETTING_CONNECTABLE;
868 
869 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 		settings |= MGMT_SETTING_DISCOVERABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 		settings |= MGMT_SETTING_BONDABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 		settings |= MGMT_SETTING_BREDR;
880 
881 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 		settings |= MGMT_SETTING_LE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 		settings |= MGMT_SETTING_LINK_SECURITY;
886 
887 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 		settings |= MGMT_SETTING_SSP;
889 
890 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 		settings |= MGMT_SETTING_ADVERTISING;
892 
893 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 		settings |= MGMT_SETTING_SECURE_CONN;
895 
896 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 		settings |= MGMT_SETTING_DEBUG_KEYS;
898 
899 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 		settings |= MGMT_SETTING_PRIVACY;
901 
902 	/* The current setting for static address has two purposes. The
903 	 * first is to indicate if the static address will be used and
904 	 * the second is to indicate if it is actually set.
905 	 *
906 	 * This means if the static address is not configured, this flag
907 	 * will never be set. If the address is configured, then if the
908 	 * address is actually used decides if the flag is set or not.
909 	 *
910 	 * For single mode LE only controllers and dual-mode controllers
911 	 * with BR/EDR disabled, the existence of the static address will
912 	 * be evaluated.
913 	 */
914 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 			settings |= MGMT_SETTING_STATIC_ADDRESS;
919 	}
920 
921 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923 
924 	if (cis_central_capable(hdev))
925 		settings |= MGMT_SETTING_CIS_CENTRAL;
926 
927 	if (cis_peripheral_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
929 
930 	if (bis_capable(hdev))
931 		settings |= MGMT_SETTING_ISO_BROADCASTER;
932 
933 	if (sync_recv_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935 
936 	return settings;
937 }
938 
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943 
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 	struct mgmt_pending_cmd *cmd;
947 
948 	/* If there's a pending mgmt command the flags will not yet have
949 	 * their final values, so check for this first.
950 	 */
951 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 	if (cmd) {
953 		struct mgmt_mode *cp = cmd->param;
954 		if (cp->val == 0x01)
955 			return LE_AD_GENERAL;
956 		else if (cp->val == 0x02)
957 			return LE_AD_LIMITED;
958 	} else {
959 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 			return LE_AD_LIMITED;
961 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 			return LE_AD_GENERAL;
963 	}
964 
965 	return 0;
966 }
967 
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 	struct mgmt_pending_cmd *cmd;
971 
972 	/* If there's a pending mgmt command the flag will not yet have
973 	 * it's final value, so check for this first.
974 	 */
975 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 	if (cmd) {
977 		struct mgmt_mode *cp = cmd->param;
978 
979 		return cp->val;
980 	}
981 
982 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984 
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 	hci_update_eir_sync(hdev);
988 	hci_update_class_sync(hdev);
989 
990 	return 0;
991 }
992 
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 	struct hci_dev *hdev = container_of(work, struct hci_dev,
996 					    service_cache.work);
997 
998 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 		return;
1000 
1001 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003 
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 	/* The generation of a new RPA and programming it into the
1007 	 * controller happens in the hci_req_enable_advertising()
1008 	 * function.
1009 	 */
1010 	if (ext_adv_capable(hdev))
1011 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 	else
1013 		return hci_enable_advertising_sync(hdev);
1014 }
1015 
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 					    rpa_expired.work);
1020 
1021 	bt_dev_dbg(hdev, "");
1022 
1023 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 
1025 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 		return;
1027 
1028 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030 
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032 
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 					    discov_off.work);
1037 
1038 	bt_dev_dbg(hdev, "");
1039 
1040 	hci_dev_lock(hdev);
1041 
1042 	/* When discoverable timeout triggers, then just make sure
1043 	 * the limited discoverable flag is cleared. Even in the case
1044 	 * of a timeout triggered from general discoverable, it is
1045 	 * safe to unconditionally clear the flag.
1046 	 */
1047 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 	hdev->discov_timeout = 0;
1050 
1051 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052 
1053 	mgmt_new_settings(hdev);
1054 
1055 	hci_dev_unlock(hdev);
1056 }
1057 
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059 
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 	u8 handle = mesh_tx->handle;
1064 
1065 	if (!silent)
1066 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 			   sizeof(handle), NULL);
1068 
1069 	mgmt_mesh_remove(mesh_tx);
1070 }
1071 
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 	struct mgmt_mesh_tx *mesh_tx;
1075 
1076 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 	hci_disable_advertising_sync(hdev);
1078 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1079 
1080 	if (mesh_tx)
1081 		mesh_send_complete(hdev, mesh_tx, false);
1082 
1083 	return 0;
1084 }
1085 
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (!mesh_tx)
1093 		return;
1094 
1095 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 				 mesh_send_start_complete);
1097 
1098 	if (err < 0)
1099 		mesh_send_complete(hdev, mesh_tx, false);
1100 	else
1101 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103 
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 					    mesh_send_done.work);
1108 
1109 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 		return;
1111 
1112 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114 
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 		return;
1119 
1120 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121 
1122 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126 
1127 	/* Non-mgmt controlled devices get this bit set
1128 	 * implicitly so that pairing works for them, however
1129 	 * for mgmt we require user-space to explicitly enable
1130 	 * it
1131 	 */
1132 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133 
1134 	hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136 
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 				void *data, u16 data_len)
1139 {
1140 	struct mgmt_rp_read_info rp;
1141 
1142 	bt_dev_dbg(hdev, "sock %p", sk);
1143 
1144 	hci_dev_lock(hdev);
1145 
1146 	memset(&rp, 0, sizeof(rp));
1147 
1148 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1149 
1150 	rp.version = hdev->hci_ver;
1151 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152 
1153 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	memcpy(rp.dev_class, hdev->dev_class, 3);
1157 
1158 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160 
1161 	hci_dev_unlock(hdev);
1162 
1163 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 				 sizeof(rp));
1165 }
1166 
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 	u16 eir_len = 0;
1170 	size_t name_len;
1171 
1172 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 					  hdev->dev_class, 3);
1175 
1176 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 					  hdev->appearance);
1179 
1180 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 				  hdev->dev_name, name_len);
1183 
1184 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 				  hdev->short_name, name_len);
1187 
1188 	return eir_len;
1189 }
1190 
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 				    void *data, u16 data_len)
1193 {
1194 	char buf[512];
1195 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 	u16 eir_len;
1197 
1198 	bt_dev_dbg(hdev, "sock %p", sk);
1199 
1200 	memset(&buf, 0, sizeof(buf));
1201 
1202 	hci_dev_lock(hdev);
1203 
1204 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1205 
1206 	rp->version = hdev->hci_ver;
1207 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208 
1209 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211 
1212 
1213 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 	rp->eir_len = cpu_to_le16(eir_len);
1215 
1216 	hci_dev_unlock(hdev);
1217 
1218 	/* If this command is called at least once, then the events
1219 	 * for class of device and local name changes are disabled
1220 	 * and only the new extended controller information event
1221 	 * is used.
1222 	 */
1223 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226 
1227 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 				 sizeof(*rp) + eir_len);
1229 }
1230 
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 	char buf[512];
1234 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 	u16 eir_len;
1236 
1237 	memset(buf, 0, sizeof(buf));
1238 
1239 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 	ev->eir_len = cpu_to_le16(eir_len);
1241 
1242 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 				  sizeof(*ev) + eir_len,
1244 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246 
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 
1251 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 				 sizeof(settings));
1253 }
1254 
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 	struct mgmt_ev_advertising_added ev;
1258 
1259 	ev.instance = instance;
1260 
1261 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263 
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 			      u8 instance)
1266 {
1267 	struct mgmt_ev_advertising_removed ev;
1268 
1269 	ev.instance = instance;
1270 
1271 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273 
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 	if (hdev->adv_instance_timeout) {
1277 		hdev->adv_instance_timeout = 0;
1278 		cancel_delayed_work(&hdev->adv_instance_expire);
1279 	}
1280 }
1281 
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 	struct hci_conn_params *p;
1286 
1287 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 		/* Needed for AUTO_OFF case where might not "really"
1289 		 * have been powered off.
1290 		 */
1291 		hci_pend_le_list_del_init(p);
1292 
1293 		switch (p->auto_connect) {
1294 		case HCI_AUTO_CONN_DIRECT:
1295 		case HCI_AUTO_CONN_ALWAYS:
1296 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 			break;
1298 		case HCI_AUTO_CONN_REPORT:
1299 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 			break;
1301 		default:
1302 			break;
1303 		}
1304 	}
1305 }
1306 
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1310 
1311 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314 
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 	struct mgmt_pending_cmd *cmd = data;
1318 	struct mgmt_mode *cp;
1319 
1320 	/* Make sure cmd still outstanding. */
1321 	if (err == -ECANCELED ||
1322 	    cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 		return;
1324 
1325 	cp = cmd->param;
1326 
1327 	bt_dev_dbg(hdev, "err %d", err);
1328 
1329 	if (!err) {
1330 		if (cp->val) {
1331 			hci_dev_lock(hdev);
1332 			restart_le_actions(hdev);
1333 			hci_update_passive_scan(hdev);
1334 			hci_dev_unlock(hdev);
1335 		}
1336 
1337 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338 
1339 		/* Only call new_setting for power on as power off is deferred
1340 		 * to hdev->power_off work which does call hci_dev_do_close.
1341 		 */
1342 		if (cp->val)
1343 			new_settings(hdev, cmd->sk);
1344 	} else {
1345 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 				mgmt_status(err));
1347 	}
1348 
1349 	mgmt_pending_remove(cmd);
1350 }
1351 
set_powered_sync(struct hci_dev * hdev,void * data)1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 	struct mgmt_pending_cmd *cmd = data;
1355 	struct mgmt_mode *cp;
1356 
1357 	/* Make sure cmd still outstanding. */
1358 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 		return -ECANCELED;
1360 
1361 	cp = cmd->param;
1362 
1363 	BT_DBG("%s", hdev->name);
1364 
1365 	return hci_set_powered_sync(hdev, cp->val);
1366 }
1367 
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 		       u16 len)
1370 {
1371 	struct mgmt_mode *cp = data;
1372 	struct mgmt_pending_cmd *cmd;
1373 	int err;
1374 
1375 	bt_dev_dbg(hdev, "sock %p", sk);
1376 
1377 	if (cp->val != 0x00 && cp->val != 0x01)
1378 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 				       MGMT_STATUS_INVALID_PARAMS);
1380 
1381 	hci_dev_lock(hdev);
1382 
1383 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 				      MGMT_STATUS_BUSY);
1386 		goto failed;
1387 	}
1388 
1389 	if (!!cp->val == hdev_is_powered(hdev)) {
1390 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 		goto failed;
1392 	}
1393 
1394 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 	if (!cmd) {
1396 		err = -ENOMEM;
1397 		goto failed;
1398 	}
1399 
1400 	/* Cancel potentially blocking sync operation before power off */
1401 	if (cp->val == 0x00) {
1402 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 					 mgmt_set_powered_complete);
1405 	} else {
1406 		/* Use hci_cmd_sync_submit since hdev might not be running */
1407 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 					  mgmt_set_powered_complete);
1409 	}
1410 
1411 	if (err < 0)
1412 		mgmt_pending_remove(cmd);
1413 
1414 failed:
1415 	hci_dev_unlock(hdev);
1416 	return err;
1417 }
1418 
mgmt_new_settings(struct hci_dev * hdev)1419 int mgmt_new_settings(struct hci_dev *hdev)
1420 {
1421 	return new_settings(hdev, NULL);
1422 }
1423 
1424 struct cmd_lookup {
1425 	struct sock *sk;
1426 	struct hci_dev *hdev;
1427 	u8 mgmt_status;
1428 };
1429 
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 {
1432 	struct cmd_lookup *match = data;
1433 
1434 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435 
1436 	if (match->sk == NULL) {
1437 		match->sk = cmd->sk;
1438 		sock_hold(match->sk);
1439 	}
1440 }
1441 
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1442 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1443 {
1444 	u8 *status = data;
1445 
1446 	mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1447 }
1448 
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1449 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1450 {
1451 	struct cmd_lookup *match = data;
1452 
1453 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1454 	 * removed/freed.
1455 	 */
1456 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1457 
1458 	if (cmd->cmd_complete) {
1459 		cmd->cmd_complete(cmd, match->mgmt_status);
1460 		return;
1461 	}
1462 
1463 	cmd_status_rsp(cmd, data);
1464 }
1465 
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1466 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1467 {
1468 	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1469 				 cmd->param, cmd->param_len);
1470 }
1471 
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1472 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1473 {
1474 	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1475 				 cmd->param, sizeof(struct mgmt_addr_info));
1476 }
1477 
mgmt_bredr_support(struct hci_dev * hdev)1478 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1479 {
1480 	if (!lmp_bredr_capable(hdev))
1481 		return MGMT_STATUS_NOT_SUPPORTED;
1482 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1483 		return MGMT_STATUS_REJECTED;
1484 	else
1485 		return MGMT_STATUS_SUCCESS;
1486 }
1487 
mgmt_le_support(struct hci_dev * hdev)1488 static u8 mgmt_le_support(struct hci_dev *hdev)
1489 {
1490 	if (!lmp_le_capable(hdev))
1491 		return MGMT_STATUS_NOT_SUPPORTED;
1492 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1493 		return MGMT_STATUS_REJECTED;
1494 	else
1495 		return MGMT_STATUS_SUCCESS;
1496 }
1497 
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1498 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1499 					   int err)
1500 {
1501 	struct mgmt_pending_cmd *cmd = data;
1502 
1503 	bt_dev_dbg(hdev, "err %d", err);
1504 
1505 	/* Make sure cmd still outstanding. */
1506 	if (err == -ECANCELED ||
1507 	    cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1508 		return;
1509 
1510 	hci_dev_lock(hdev);
1511 
1512 	if (err) {
1513 		u8 mgmt_err = mgmt_status(err);
1514 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1515 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1516 		goto done;
1517 	}
1518 
1519 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1520 	    hdev->discov_timeout > 0) {
1521 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1522 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1523 	}
1524 
1525 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1526 	new_settings(hdev, cmd->sk);
1527 
1528 done:
1529 	mgmt_pending_remove(cmd);
1530 	hci_dev_unlock(hdev);
1531 }
1532 
set_discoverable_sync(struct hci_dev * hdev,void * data)1533 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1534 {
1535 	BT_DBG("%s", hdev->name);
1536 
1537 	return hci_update_discoverable_sync(hdev);
1538 }
1539 
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1540 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1541 			    u16 len)
1542 {
1543 	struct mgmt_cp_set_discoverable *cp = data;
1544 	struct mgmt_pending_cmd *cmd;
1545 	u16 timeout;
1546 	int err;
1547 
1548 	bt_dev_dbg(hdev, "sock %p", sk);
1549 
1550 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1551 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1552 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1553 				       MGMT_STATUS_REJECTED);
1554 
1555 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1556 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1557 				       MGMT_STATUS_INVALID_PARAMS);
1558 
1559 	timeout = __le16_to_cpu(cp->timeout);
1560 
1561 	/* Disabling discoverable requires that no timeout is set,
1562 	 * and enabling limited discoverable requires a timeout.
1563 	 */
1564 	if ((cp->val == 0x00 && timeout > 0) ||
1565 	    (cp->val == 0x02 && timeout == 0))
1566 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 				       MGMT_STATUS_INVALID_PARAMS);
1568 
1569 	hci_dev_lock(hdev);
1570 
1571 	if (!hdev_is_powered(hdev) && timeout > 0) {
1572 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 				      MGMT_STATUS_NOT_POWERED);
1574 		goto failed;
1575 	}
1576 
1577 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1578 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1579 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 				      MGMT_STATUS_BUSY);
1581 		goto failed;
1582 	}
1583 
1584 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1585 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 				      MGMT_STATUS_REJECTED);
1587 		goto failed;
1588 	}
1589 
1590 	if (hdev->advertising_paused) {
1591 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1592 				      MGMT_STATUS_BUSY);
1593 		goto failed;
1594 	}
1595 
1596 	if (!hdev_is_powered(hdev)) {
1597 		bool changed = false;
1598 
1599 		/* Setting limited discoverable when powered off is
1600 		 * not a valid operation since it requires a timeout
1601 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1602 		 */
1603 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1604 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1605 			changed = true;
1606 		}
1607 
1608 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1609 		if (err < 0)
1610 			goto failed;
1611 
1612 		if (changed)
1613 			err = new_settings(hdev, sk);
1614 
1615 		goto failed;
1616 	}
1617 
1618 	/* If the current mode is the same, then just update the timeout
1619 	 * value with the new value. And if only the timeout gets updated,
1620 	 * then no need for any HCI transactions.
1621 	 */
1622 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1623 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1624 						   HCI_LIMITED_DISCOVERABLE)) {
1625 		cancel_delayed_work(&hdev->discov_off);
1626 		hdev->discov_timeout = timeout;
1627 
1628 		if (cp->val && hdev->discov_timeout > 0) {
1629 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1630 			queue_delayed_work(hdev->req_workqueue,
1631 					   &hdev->discov_off, to);
1632 		}
1633 
1634 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1635 		goto failed;
1636 	}
1637 
1638 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1639 	if (!cmd) {
1640 		err = -ENOMEM;
1641 		goto failed;
1642 	}
1643 
1644 	/* Cancel any potential discoverable timeout that might be
1645 	 * still active and store new timeout value. The arming of
1646 	 * the timeout happens in the complete handler.
1647 	 */
1648 	cancel_delayed_work(&hdev->discov_off);
1649 	hdev->discov_timeout = timeout;
1650 
1651 	if (cp->val)
1652 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1653 	else
1654 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1655 
1656 	/* Limited discoverable mode */
1657 	if (cp->val == 0x02)
1658 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1659 	else
1660 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1661 
1662 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1663 				 mgmt_set_discoverable_complete);
1664 
1665 	if (err < 0)
1666 		mgmt_pending_remove(cmd);
1667 
1668 failed:
1669 	hci_dev_unlock(hdev);
1670 	return err;
1671 }
1672 
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1673 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1674 					  int err)
1675 {
1676 	struct mgmt_pending_cmd *cmd = data;
1677 
1678 	bt_dev_dbg(hdev, "err %d", err);
1679 
1680 	/* Make sure cmd still outstanding. */
1681 	if (err == -ECANCELED ||
1682 	    cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1683 		return;
1684 
1685 	hci_dev_lock(hdev);
1686 
1687 	if (err) {
1688 		u8 mgmt_err = mgmt_status(err);
1689 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1690 		goto done;
1691 	}
1692 
1693 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1694 	new_settings(hdev, cmd->sk);
1695 
1696 done:
1697 	if (cmd)
1698 		mgmt_pending_remove(cmd);
1699 
1700 	hci_dev_unlock(hdev);
1701 }
1702 
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1703 static int set_connectable_update_settings(struct hci_dev *hdev,
1704 					   struct sock *sk, u8 val)
1705 {
1706 	bool changed = false;
1707 	int err;
1708 
1709 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1710 		changed = true;
1711 
1712 	if (val) {
1713 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1714 	} else {
1715 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1716 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1717 	}
1718 
1719 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1720 	if (err < 0)
1721 		return err;
1722 
1723 	if (changed) {
1724 		hci_update_scan(hdev);
1725 		hci_update_passive_scan(hdev);
1726 		return new_settings(hdev, sk);
1727 	}
1728 
1729 	return 0;
1730 }
1731 
set_connectable_sync(struct hci_dev * hdev,void * data)1732 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1733 {
1734 	BT_DBG("%s", hdev->name);
1735 
1736 	return hci_update_connectable_sync(hdev);
1737 }
1738 
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1739 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1740 			   u16 len)
1741 {
1742 	struct mgmt_mode *cp = data;
1743 	struct mgmt_pending_cmd *cmd;
1744 	int err;
1745 
1746 	bt_dev_dbg(hdev, "sock %p", sk);
1747 
1748 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1749 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1750 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1751 				       MGMT_STATUS_REJECTED);
1752 
1753 	if (cp->val != 0x00 && cp->val != 0x01)
1754 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1755 				       MGMT_STATUS_INVALID_PARAMS);
1756 
1757 	hci_dev_lock(hdev);
1758 
1759 	if (!hdev_is_powered(hdev)) {
1760 		err = set_connectable_update_settings(hdev, sk, cp->val);
1761 		goto failed;
1762 	}
1763 
1764 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1765 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1766 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 				      MGMT_STATUS_BUSY);
1768 		goto failed;
1769 	}
1770 
1771 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1772 	if (!cmd) {
1773 		err = -ENOMEM;
1774 		goto failed;
1775 	}
1776 
1777 	if (cp->val) {
1778 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1779 	} else {
1780 		if (hdev->discov_timeout > 0)
1781 			cancel_delayed_work(&hdev->discov_off);
1782 
1783 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1784 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1785 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1786 	}
1787 
1788 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1789 				 mgmt_set_connectable_complete);
1790 
1791 	if (err < 0)
1792 		mgmt_pending_remove(cmd);
1793 
1794 failed:
1795 	hci_dev_unlock(hdev);
1796 	return err;
1797 }
1798 
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1799 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1800 			u16 len)
1801 {
1802 	struct mgmt_mode *cp = data;
1803 	bool changed;
1804 	int err;
1805 
1806 	bt_dev_dbg(hdev, "sock %p", sk);
1807 
1808 	if (cp->val != 0x00 && cp->val != 0x01)
1809 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1810 				       MGMT_STATUS_INVALID_PARAMS);
1811 
1812 	hci_dev_lock(hdev);
1813 
1814 	if (cp->val)
1815 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1816 	else
1817 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1818 
1819 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1820 	if (err < 0)
1821 		goto unlock;
1822 
1823 	if (changed) {
1824 		/* In limited privacy mode the change of bondable mode
1825 		 * may affect the local advertising address.
1826 		 */
1827 		hci_update_discoverable(hdev);
1828 
1829 		err = new_settings(hdev, sk);
1830 	}
1831 
1832 unlock:
1833 	hci_dev_unlock(hdev);
1834 	return err;
1835 }
1836 
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1837 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1838 			     u16 len)
1839 {
1840 	struct mgmt_mode *cp = data;
1841 	struct mgmt_pending_cmd *cmd;
1842 	u8 val, status;
1843 	int err;
1844 
1845 	bt_dev_dbg(hdev, "sock %p", sk);
1846 
1847 	status = mgmt_bredr_support(hdev);
1848 	if (status)
1849 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1850 				       status);
1851 
1852 	if (cp->val != 0x00 && cp->val != 0x01)
1853 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1854 				       MGMT_STATUS_INVALID_PARAMS);
1855 
1856 	hci_dev_lock(hdev);
1857 
1858 	if (!hdev_is_powered(hdev)) {
1859 		bool changed = false;
1860 
1861 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1862 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1863 			changed = true;
1864 		}
1865 
1866 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1867 		if (err < 0)
1868 			goto failed;
1869 
1870 		if (changed)
1871 			err = new_settings(hdev, sk);
1872 
1873 		goto failed;
1874 	}
1875 
1876 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1877 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1878 				      MGMT_STATUS_BUSY);
1879 		goto failed;
1880 	}
1881 
1882 	val = !!cp->val;
1883 
1884 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1885 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1886 		goto failed;
1887 	}
1888 
1889 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1890 	if (!cmd) {
1891 		err = -ENOMEM;
1892 		goto failed;
1893 	}
1894 
1895 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1896 	if (err < 0) {
1897 		mgmt_pending_remove(cmd);
1898 		goto failed;
1899 	}
1900 
1901 failed:
1902 	hci_dev_unlock(hdev);
1903 	return err;
1904 }
1905 
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1906 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1907 {
1908 	struct cmd_lookup match = { NULL, hdev };
1909 	struct mgmt_pending_cmd *cmd = data;
1910 	struct mgmt_mode *cp = cmd->param;
1911 	u8 enable = cp->val;
1912 	bool changed;
1913 
1914 	/* Make sure cmd still outstanding. */
1915 	if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1916 		return;
1917 
1918 	if (err) {
1919 		u8 mgmt_err = mgmt_status(err);
1920 
1921 		if (enable && hci_dev_test_and_clear_flag(hdev,
1922 							  HCI_SSP_ENABLED)) {
1923 			new_settings(hdev, NULL);
1924 		}
1925 
1926 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
1927 				     cmd_status_rsp, &mgmt_err);
1928 		return;
1929 	}
1930 
1931 	if (enable) {
1932 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1933 	} else {
1934 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1935 	}
1936 
1937 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
1938 
1939 	if (changed)
1940 		new_settings(hdev, match.sk);
1941 
1942 	if (match.sk)
1943 		sock_put(match.sk);
1944 
1945 	hci_update_eir_sync(hdev);
1946 }
1947 
set_ssp_sync(struct hci_dev * hdev,void * data)1948 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1949 {
1950 	struct mgmt_pending_cmd *cmd = data;
1951 	struct mgmt_mode *cp = cmd->param;
1952 	bool changed = false;
1953 	int err;
1954 
1955 	if (cp->val)
1956 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1957 
1958 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1959 
1960 	if (!err && changed)
1961 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1962 
1963 	return err;
1964 }
1965 
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1966 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1967 {
1968 	struct mgmt_mode *cp = data;
1969 	struct mgmt_pending_cmd *cmd;
1970 	u8 status;
1971 	int err;
1972 
1973 	bt_dev_dbg(hdev, "sock %p", sk);
1974 
1975 	status = mgmt_bredr_support(hdev);
1976 	if (status)
1977 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1978 
1979 	if (!lmp_ssp_capable(hdev))
1980 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1981 				       MGMT_STATUS_NOT_SUPPORTED);
1982 
1983 	if (cp->val != 0x00 && cp->val != 0x01)
1984 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1985 				       MGMT_STATUS_INVALID_PARAMS);
1986 
1987 	hci_dev_lock(hdev);
1988 
1989 	if (!hdev_is_powered(hdev)) {
1990 		bool changed;
1991 
1992 		if (cp->val) {
1993 			changed = !hci_dev_test_and_set_flag(hdev,
1994 							     HCI_SSP_ENABLED);
1995 		} else {
1996 			changed = hci_dev_test_and_clear_flag(hdev,
1997 							      HCI_SSP_ENABLED);
1998 		}
1999 
2000 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2001 		if (err < 0)
2002 			goto failed;
2003 
2004 		if (changed)
2005 			err = new_settings(hdev, sk);
2006 
2007 		goto failed;
2008 	}
2009 
2010 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2011 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2012 				      MGMT_STATUS_BUSY);
2013 		goto failed;
2014 	}
2015 
2016 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2017 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2018 		goto failed;
2019 	}
2020 
2021 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2022 	if (!cmd)
2023 		err = -ENOMEM;
2024 	else
2025 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2026 					 set_ssp_complete);
2027 
2028 	if (err < 0) {
2029 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2030 				      MGMT_STATUS_FAILED);
2031 
2032 		if (cmd)
2033 			mgmt_pending_remove(cmd);
2034 	}
2035 
2036 failed:
2037 	hci_dev_unlock(hdev);
2038 	return err;
2039 }
2040 
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2041 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2042 {
2043 	bt_dev_dbg(hdev, "sock %p", sk);
2044 
2045 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2046 				       MGMT_STATUS_NOT_SUPPORTED);
2047 }
2048 
set_le_complete(struct hci_dev * hdev,void * data,int err)2049 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2050 {
2051 	struct cmd_lookup match = { NULL, hdev };
2052 	u8 status = mgmt_status(err);
2053 
2054 	bt_dev_dbg(hdev, "err %d", err);
2055 
2056 	if (status) {
2057 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
2058 				     &status);
2059 		return;
2060 	}
2061 
2062 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
2063 
2064 	new_settings(hdev, match.sk);
2065 
2066 	if (match.sk)
2067 		sock_put(match.sk);
2068 }
2069 
set_le_sync(struct hci_dev * hdev,void * data)2070 static int set_le_sync(struct hci_dev *hdev, void *data)
2071 {
2072 	struct mgmt_pending_cmd *cmd = data;
2073 	struct mgmt_mode *cp = cmd->param;
2074 	u8 val = !!cp->val;
2075 	int err;
2076 
2077 	if (!val) {
2078 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2079 
2080 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2081 			hci_disable_advertising_sync(hdev);
2082 
2083 		if (ext_adv_capable(hdev))
2084 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2085 	} else {
2086 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2087 	}
2088 
2089 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2090 
2091 	/* Make sure the controller has a good default for
2092 	 * advertising data. Restrict the update to when LE
2093 	 * has actually been enabled. During power on, the
2094 	 * update in powered_update_hci will take care of it.
2095 	 */
2096 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2097 		if (ext_adv_capable(hdev)) {
2098 			int status;
2099 
2100 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2101 			if (!status)
2102 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2103 		} else {
2104 			hci_update_adv_data_sync(hdev, 0x00);
2105 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2106 		}
2107 
2108 		hci_update_passive_scan(hdev);
2109 	}
2110 
2111 	return err;
2112 }
2113 
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2114 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2115 {
2116 	struct mgmt_pending_cmd *cmd = data;
2117 	u8 status = mgmt_status(err);
2118 	struct sock *sk = cmd->sk;
2119 
2120 	if (status) {
2121 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2122 				     cmd_status_rsp, &status);
2123 		return;
2124 	}
2125 
2126 	mgmt_pending_remove(cmd);
2127 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2128 }
2129 
set_mesh_sync(struct hci_dev * hdev,void * data)2130 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2131 {
2132 	struct mgmt_pending_cmd *cmd = data;
2133 	struct mgmt_cp_set_mesh *cp = cmd->param;
2134 	size_t len = cmd->param_len;
2135 
2136 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2137 
2138 	if (cp->enable)
2139 		hci_dev_set_flag(hdev, HCI_MESH);
2140 	else
2141 		hci_dev_clear_flag(hdev, HCI_MESH);
2142 
2143 	len -= sizeof(*cp);
2144 
2145 	/* If filters don't fit, forward all adv pkts */
2146 	if (len <= sizeof(hdev->mesh_ad_types))
2147 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2148 
2149 	hci_update_passive_scan_sync(hdev);
2150 	return 0;
2151 }
2152 
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2153 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2154 {
2155 	struct mgmt_cp_set_mesh *cp = data;
2156 	struct mgmt_pending_cmd *cmd;
2157 	int err = 0;
2158 
2159 	bt_dev_dbg(hdev, "sock %p", sk);
2160 
2161 	if (!lmp_le_capable(hdev) ||
2162 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2163 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2164 				       MGMT_STATUS_NOT_SUPPORTED);
2165 
2166 	if (cp->enable != 0x00 && cp->enable != 0x01)
2167 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2168 				       MGMT_STATUS_INVALID_PARAMS);
2169 
2170 	hci_dev_lock(hdev);
2171 
2172 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2173 	if (!cmd)
2174 		err = -ENOMEM;
2175 	else
2176 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2177 					 set_mesh_complete);
2178 
2179 	if (err < 0) {
2180 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2181 				      MGMT_STATUS_FAILED);
2182 
2183 		if (cmd)
2184 			mgmt_pending_remove(cmd);
2185 	}
2186 
2187 	hci_dev_unlock(hdev);
2188 	return err;
2189 }
2190 
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2191 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2192 {
2193 	struct mgmt_mesh_tx *mesh_tx = data;
2194 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2195 	unsigned long mesh_send_interval;
2196 	u8 mgmt_err = mgmt_status(err);
2197 
2198 	/* Report any errors here, but don't report completion */
2199 
2200 	if (mgmt_err) {
2201 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2202 		/* Send Complete Error Code for handle */
2203 		mesh_send_complete(hdev, mesh_tx, false);
2204 		return;
2205 	}
2206 
2207 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2208 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2209 			   mesh_send_interval);
2210 }
2211 
mesh_send_sync(struct hci_dev * hdev,void * data)2212 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2213 {
2214 	struct mgmt_mesh_tx *mesh_tx = data;
2215 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2216 	struct adv_info *adv, *next_instance;
2217 	u8 instance = hdev->le_num_of_adv_sets + 1;
2218 	u16 timeout, duration;
2219 	int err = 0;
2220 
2221 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2222 		return MGMT_STATUS_BUSY;
2223 
2224 	timeout = 1000;
2225 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2226 	adv = hci_add_adv_instance(hdev, instance, 0,
2227 				   send->adv_data_len, send->adv_data,
2228 				   0, NULL,
2229 				   timeout, duration,
2230 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2231 				   hdev->le_adv_min_interval,
2232 				   hdev->le_adv_max_interval,
2233 				   mesh_tx->handle);
2234 
2235 	if (!IS_ERR(adv))
2236 		mesh_tx->instance = instance;
2237 	else
2238 		err = PTR_ERR(adv);
2239 
2240 	if (hdev->cur_adv_instance == instance) {
2241 		/* If the currently advertised instance is being changed then
2242 		 * cancel the current advertising and schedule the next
2243 		 * instance. If there is only one instance then the overridden
2244 		 * advertising data will be visible right away.
2245 		 */
2246 		cancel_adv_timeout(hdev);
2247 
2248 		next_instance = hci_get_next_instance(hdev, instance);
2249 		if (next_instance)
2250 			instance = next_instance->instance;
2251 		else
2252 			instance = 0;
2253 	} else if (hdev->adv_instance_timeout) {
2254 		/* Immediately advertise the new instance if no other, or
2255 		 * let it go naturally from queue if ADV is already happening
2256 		 */
2257 		instance = 0;
2258 	}
2259 
2260 	if (instance)
2261 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2262 
2263 	return err;
2264 }
2265 
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2266 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2267 {
2268 	struct mgmt_rp_mesh_read_features *rp = data;
2269 
2270 	if (rp->used_handles >= rp->max_handles)
2271 		return;
2272 
2273 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2274 }
2275 
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2276 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2277 			 void *data, u16 len)
2278 {
2279 	struct mgmt_rp_mesh_read_features rp;
2280 
2281 	if (!lmp_le_capable(hdev) ||
2282 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2283 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2284 				       MGMT_STATUS_NOT_SUPPORTED);
2285 
2286 	memset(&rp, 0, sizeof(rp));
2287 	rp.index = cpu_to_le16(hdev->id);
2288 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2289 		rp.max_handles = MESH_HANDLES_MAX;
2290 
2291 	hci_dev_lock(hdev);
2292 
2293 	if (rp.max_handles)
2294 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2295 
2296 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2297 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2298 
2299 	hci_dev_unlock(hdev);
2300 	return 0;
2301 }
2302 
send_cancel(struct hci_dev * hdev,void * data)2303 static int send_cancel(struct hci_dev *hdev, void *data)
2304 {
2305 	struct mgmt_pending_cmd *cmd = data;
2306 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2307 	struct mgmt_mesh_tx *mesh_tx;
2308 
2309 	if (!cancel->handle) {
2310 		do {
2311 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2312 
2313 			if (mesh_tx)
2314 				mesh_send_complete(hdev, mesh_tx, false);
2315 		} while (mesh_tx);
2316 	} else {
2317 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2318 
2319 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2320 			mesh_send_complete(hdev, mesh_tx, false);
2321 	}
2322 
2323 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2324 			  0, NULL, 0);
2325 	mgmt_pending_free(cmd);
2326 
2327 	return 0;
2328 }
2329 
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2330 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2331 			    void *data, u16 len)
2332 {
2333 	struct mgmt_pending_cmd *cmd;
2334 	int err;
2335 
2336 	if (!lmp_le_capable(hdev) ||
2337 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2338 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2339 				       MGMT_STATUS_NOT_SUPPORTED);
2340 
2341 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2342 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2343 				       MGMT_STATUS_REJECTED);
2344 
2345 	hci_dev_lock(hdev);
2346 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2347 	if (!cmd)
2348 		err = -ENOMEM;
2349 	else
2350 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2351 
2352 	if (err < 0) {
2353 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2354 				      MGMT_STATUS_FAILED);
2355 
2356 		if (cmd)
2357 			mgmt_pending_free(cmd);
2358 	}
2359 
2360 	hci_dev_unlock(hdev);
2361 	return err;
2362 }
2363 
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2364 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2365 {
2366 	struct mgmt_mesh_tx *mesh_tx;
2367 	struct mgmt_cp_mesh_send *send = data;
2368 	struct mgmt_rp_mesh_read_features rp;
2369 	bool sending;
2370 	int err = 0;
2371 
2372 	if (!lmp_le_capable(hdev) ||
2373 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2374 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2375 				       MGMT_STATUS_NOT_SUPPORTED);
2376 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2377 	    len <= MGMT_MESH_SEND_SIZE ||
2378 	    len > (MGMT_MESH_SEND_SIZE + 31))
2379 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2380 				       MGMT_STATUS_REJECTED);
2381 
2382 	hci_dev_lock(hdev);
2383 
2384 	memset(&rp, 0, sizeof(rp));
2385 	rp.max_handles = MESH_HANDLES_MAX;
2386 
2387 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2388 
2389 	if (rp.max_handles <= rp.used_handles) {
2390 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2391 				      MGMT_STATUS_BUSY);
2392 		goto done;
2393 	}
2394 
2395 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2396 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2397 
2398 	if (!mesh_tx)
2399 		err = -ENOMEM;
2400 	else if (!sending)
2401 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2402 					 mesh_send_start_complete);
2403 
2404 	if (err < 0) {
2405 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2406 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2407 				      MGMT_STATUS_FAILED);
2408 
2409 		if (mesh_tx) {
2410 			if (sending)
2411 				mgmt_mesh_remove(mesh_tx);
2412 		}
2413 	} else {
2414 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2415 
2416 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2417 				  &mesh_tx->handle, 1);
2418 	}
2419 
2420 done:
2421 	hci_dev_unlock(hdev);
2422 	return err;
2423 }
2424 
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2425 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2426 {
2427 	struct mgmt_mode *cp = data;
2428 	struct mgmt_pending_cmd *cmd;
2429 	int err;
2430 	u8 val, enabled;
2431 
2432 	bt_dev_dbg(hdev, "sock %p", sk);
2433 
2434 	if (!lmp_le_capable(hdev))
2435 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2436 				       MGMT_STATUS_NOT_SUPPORTED);
2437 
2438 	if (cp->val != 0x00 && cp->val != 0x01)
2439 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2440 				       MGMT_STATUS_INVALID_PARAMS);
2441 
2442 	/* Bluetooth single mode LE only controllers or dual-mode
2443 	 * controllers configured as LE only devices, do not allow
2444 	 * switching LE off. These have either LE enabled explicitly
2445 	 * or BR/EDR has been previously switched off.
2446 	 *
2447 	 * When trying to enable an already enabled LE, then gracefully
2448 	 * send a positive response. Trying to disable it however will
2449 	 * result into rejection.
2450 	 */
2451 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2452 		if (cp->val == 0x01)
2453 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2454 
2455 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2456 				       MGMT_STATUS_REJECTED);
2457 	}
2458 
2459 	hci_dev_lock(hdev);
2460 
2461 	val = !!cp->val;
2462 	enabled = lmp_host_le_capable(hdev);
2463 
2464 	if (!hdev_is_powered(hdev) || val == enabled) {
2465 		bool changed = false;
2466 
2467 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2468 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2469 			changed = true;
2470 		}
2471 
2472 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2473 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2474 			changed = true;
2475 		}
2476 
2477 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2478 		if (err < 0)
2479 			goto unlock;
2480 
2481 		if (changed)
2482 			err = new_settings(hdev, sk);
2483 
2484 		goto unlock;
2485 	}
2486 
2487 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2488 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2489 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2490 				      MGMT_STATUS_BUSY);
2491 		goto unlock;
2492 	}
2493 
2494 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2495 	if (!cmd)
2496 		err = -ENOMEM;
2497 	else
2498 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2499 					 set_le_complete);
2500 
2501 	if (err < 0) {
2502 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503 				      MGMT_STATUS_FAILED);
2504 
2505 		if (cmd)
2506 			mgmt_pending_remove(cmd);
2507 	}
2508 
2509 unlock:
2510 	hci_dev_unlock(hdev);
2511 	return err;
2512 }
2513 
2514 /* This is a helper function to test for pending mgmt commands that can
2515  * cause CoD or EIR HCI commands. We can only allow one such pending
2516  * mgmt command at a time since otherwise we cannot easily track what
2517  * the current values are, will be, and based on that calculate if a new
2518  * HCI command needs to be sent and if yes with what value.
2519  */
pending_eir_or_class(struct hci_dev * hdev)2520 static bool pending_eir_or_class(struct hci_dev *hdev)
2521 {
2522 	struct mgmt_pending_cmd *cmd;
2523 
2524 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2525 		switch (cmd->opcode) {
2526 		case MGMT_OP_ADD_UUID:
2527 		case MGMT_OP_REMOVE_UUID:
2528 		case MGMT_OP_SET_DEV_CLASS:
2529 		case MGMT_OP_SET_POWERED:
2530 			return true;
2531 		}
2532 	}
2533 
2534 	return false;
2535 }
2536 
2537 static const u8 bluetooth_base_uuid[] = {
2538 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2539 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2540 };
2541 
get_uuid_size(const u8 * uuid)2542 static u8 get_uuid_size(const u8 *uuid)
2543 {
2544 	u32 val;
2545 
2546 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2547 		return 128;
2548 
2549 	val = get_unaligned_le32(&uuid[12]);
2550 	if (val > 0xffff)
2551 		return 32;
2552 
2553 	return 16;
2554 }
2555 
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2556 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2557 {
2558 	struct mgmt_pending_cmd *cmd = data;
2559 
2560 	bt_dev_dbg(hdev, "err %d", err);
2561 
2562 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2563 			  mgmt_status(err), hdev->dev_class, 3);
2564 
2565 	mgmt_pending_free(cmd);
2566 }
2567 
add_uuid_sync(struct hci_dev * hdev,void * data)2568 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2569 {
2570 	int err;
2571 
2572 	err = hci_update_class_sync(hdev);
2573 	if (err)
2574 		return err;
2575 
2576 	return hci_update_eir_sync(hdev);
2577 }
2578 
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2579 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2580 {
2581 	struct mgmt_cp_add_uuid *cp = data;
2582 	struct mgmt_pending_cmd *cmd;
2583 	struct bt_uuid *uuid;
2584 	int err;
2585 
2586 	bt_dev_dbg(hdev, "sock %p", sk);
2587 
2588 	hci_dev_lock(hdev);
2589 
2590 	if (pending_eir_or_class(hdev)) {
2591 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2592 				      MGMT_STATUS_BUSY);
2593 		goto failed;
2594 	}
2595 
2596 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2597 	if (!uuid) {
2598 		err = -ENOMEM;
2599 		goto failed;
2600 	}
2601 
2602 	memcpy(uuid->uuid, cp->uuid, 16);
2603 	uuid->svc_hint = cp->svc_hint;
2604 	uuid->size = get_uuid_size(cp->uuid);
2605 
2606 	list_add_tail(&uuid->list, &hdev->uuids);
2607 
2608 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2609 	if (!cmd) {
2610 		err = -ENOMEM;
2611 		goto failed;
2612 	}
2613 
2614 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2615 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2616 	 */
2617 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2618 				  mgmt_class_complete);
2619 	if (err < 0) {
2620 		mgmt_pending_free(cmd);
2621 		goto failed;
2622 	}
2623 
2624 failed:
2625 	hci_dev_unlock(hdev);
2626 	return err;
2627 }
2628 
enable_service_cache(struct hci_dev * hdev)2629 static bool enable_service_cache(struct hci_dev *hdev)
2630 {
2631 	if (!hdev_is_powered(hdev))
2632 		return false;
2633 
2634 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2635 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2636 				   CACHE_TIMEOUT);
2637 		return true;
2638 	}
2639 
2640 	return false;
2641 }
2642 
remove_uuid_sync(struct hci_dev * hdev,void * data)2643 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2644 {
2645 	int err;
2646 
2647 	err = hci_update_class_sync(hdev);
2648 	if (err)
2649 		return err;
2650 
2651 	return hci_update_eir_sync(hdev);
2652 }
2653 
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2654 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2655 		       u16 len)
2656 {
2657 	struct mgmt_cp_remove_uuid *cp = data;
2658 	struct mgmt_pending_cmd *cmd;
2659 	struct bt_uuid *match, *tmp;
2660 	static const u8 bt_uuid_any[] = {
2661 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2662 	};
2663 	int err, found;
2664 
2665 	bt_dev_dbg(hdev, "sock %p", sk);
2666 
2667 	hci_dev_lock(hdev);
2668 
2669 	if (pending_eir_or_class(hdev)) {
2670 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2671 				      MGMT_STATUS_BUSY);
2672 		goto unlock;
2673 	}
2674 
2675 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2676 		hci_uuids_clear(hdev);
2677 
2678 		if (enable_service_cache(hdev)) {
2679 			err = mgmt_cmd_complete(sk, hdev->id,
2680 						MGMT_OP_REMOVE_UUID,
2681 						0, hdev->dev_class, 3);
2682 			goto unlock;
2683 		}
2684 
2685 		goto update_class;
2686 	}
2687 
2688 	found = 0;
2689 
2690 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2691 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2692 			continue;
2693 
2694 		list_del(&match->list);
2695 		kfree(match);
2696 		found++;
2697 	}
2698 
2699 	if (found == 0) {
2700 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2701 				      MGMT_STATUS_INVALID_PARAMS);
2702 		goto unlock;
2703 	}
2704 
2705 update_class:
2706 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2707 	if (!cmd) {
2708 		err = -ENOMEM;
2709 		goto unlock;
2710 	}
2711 
2712 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2713 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2714 	 */
2715 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2716 				  mgmt_class_complete);
2717 	if (err < 0)
2718 		mgmt_pending_free(cmd);
2719 
2720 unlock:
2721 	hci_dev_unlock(hdev);
2722 	return err;
2723 }
2724 
set_class_sync(struct hci_dev * hdev,void * data)2725 static int set_class_sync(struct hci_dev *hdev, void *data)
2726 {
2727 	int err = 0;
2728 
2729 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2730 		cancel_delayed_work_sync(&hdev->service_cache);
2731 		err = hci_update_eir_sync(hdev);
2732 	}
2733 
2734 	if (err)
2735 		return err;
2736 
2737 	return hci_update_class_sync(hdev);
2738 }
2739 
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2740 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2741 			 u16 len)
2742 {
2743 	struct mgmt_cp_set_dev_class *cp = data;
2744 	struct mgmt_pending_cmd *cmd;
2745 	int err;
2746 
2747 	bt_dev_dbg(hdev, "sock %p", sk);
2748 
2749 	if (!lmp_bredr_capable(hdev))
2750 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2751 				       MGMT_STATUS_NOT_SUPPORTED);
2752 
2753 	hci_dev_lock(hdev);
2754 
2755 	if (pending_eir_or_class(hdev)) {
2756 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2757 				      MGMT_STATUS_BUSY);
2758 		goto unlock;
2759 	}
2760 
2761 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2762 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2763 				      MGMT_STATUS_INVALID_PARAMS);
2764 		goto unlock;
2765 	}
2766 
2767 	hdev->major_class = cp->major;
2768 	hdev->minor_class = cp->minor;
2769 
2770 	if (!hdev_is_powered(hdev)) {
2771 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2772 					hdev->dev_class, 3);
2773 		goto unlock;
2774 	}
2775 
2776 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2777 	if (!cmd) {
2778 		err = -ENOMEM;
2779 		goto unlock;
2780 	}
2781 
2782 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2783 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2784 	 */
2785 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2786 				  mgmt_class_complete);
2787 	if (err < 0)
2788 		mgmt_pending_free(cmd);
2789 
2790 unlock:
2791 	hci_dev_unlock(hdev);
2792 	return err;
2793 }
2794 
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2795 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2796 			  u16 len)
2797 {
2798 	struct mgmt_cp_load_link_keys *cp = data;
2799 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2800 				   sizeof(struct mgmt_link_key_info));
2801 	u16 key_count, expected_len;
2802 	bool changed;
2803 	int i;
2804 
2805 	bt_dev_dbg(hdev, "sock %p", sk);
2806 
2807 	if (!lmp_bredr_capable(hdev))
2808 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2809 				       MGMT_STATUS_NOT_SUPPORTED);
2810 
2811 	key_count = __le16_to_cpu(cp->key_count);
2812 	if (key_count > max_key_count) {
2813 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2814 			   key_count);
2815 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2816 				       MGMT_STATUS_INVALID_PARAMS);
2817 	}
2818 
2819 	expected_len = struct_size(cp, keys, key_count);
2820 	if (expected_len != len) {
2821 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2822 			   expected_len, len);
2823 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2824 				       MGMT_STATUS_INVALID_PARAMS);
2825 	}
2826 
2827 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2828 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2829 				       MGMT_STATUS_INVALID_PARAMS);
2830 
2831 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2832 		   key_count);
2833 
2834 	hci_dev_lock(hdev);
2835 
2836 	hci_link_keys_clear(hdev);
2837 
2838 	if (cp->debug_keys)
2839 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2840 	else
2841 		changed = hci_dev_test_and_clear_flag(hdev,
2842 						      HCI_KEEP_DEBUG_KEYS);
2843 
2844 	if (changed)
2845 		new_settings(hdev, NULL);
2846 
2847 	for (i = 0; i < key_count; i++) {
2848 		struct mgmt_link_key_info *key = &cp->keys[i];
2849 
2850 		if (hci_is_blocked_key(hdev,
2851 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2852 				       key->val)) {
2853 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2854 				    &key->addr.bdaddr);
2855 			continue;
2856 		}
2857 
2858 		if (key->addr.type != BDADDR_BREDR) {
2859 			bt_dev_warn(hdev,
2860 				    "Invalid link address type %u for %pMR",
2861 				    key->addr.type, &key->addr.bdaddr);
2862 			continue;
2863 		}
2864 
2865 		if (key->type > 0x08) {
2866 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2867 				    key->type, &key->addr.bdaddr);
2868 			continue;
2869 		}
2870 
2871 		/* Always ignore debug keys and require a new pairing if
2872 		 * the user wants to use them.
2873 		 */
2874 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2875 			continue;
2876 
2877 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2878 				 key->type, key->pin_len, NULL);
2879 	}
2880 
2881 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2882 
2883 	hci_dev_unlock(hdev);
2884 
2885 	return 0;
2886 }
2887 
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2888 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2889 			   u8 addr_type, struct sock *skip_sk)
2890 {
2891 	struct mgmt_ev_device_unpaired ev;
2892 
2893 	bacpy(&ev.addr.bdaddr, bdaddr);
2894 	ev.addr.type = addr_type;
2895 
2896 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2897 			  skip_sk);
2898 }
2899 
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2900 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2901 {
2902 	struct mgmt_pending_cmd *cmd = data;
2903 	struct mgmt_cp_unpair_device *cp = cmd->param;
2904 
2905 	if (!err)
2906 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2907 
2908 	cmd->cmd_complete(cmd, err);
2909 	mgmt_pending_free(cmd);
2910 }
2911 
unpair_device_sync(struct hci_dev * hdev,void * data)2912 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2913 {
2914 	struct mgmt_pending_cmd *cmd = data;
2915 	struct mgmt_cp_unpair_device *cp = cmd->param;
2916 	struct hci_conn *conn;
2917 
2918 	if (cp->addr.type == BDADDR_BREDR)
2919 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2920 					       &cp->addr.bdaddr);
2921 	else
2922 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2923 					       le_addr_type(cp->addr.type));
2924 
2925 	if (!conn)
2926 		return 0;
2927 
2928 	/* Disregard any possible error since the likes of hci_abort_conn_sync
2929 	 * will clean up the connection no matter the error.
2930 	 */
2931 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2932 
2933 	return 0;
2934 }
2935 
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2936 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2937 			 u16 len)
2938 {
2939 	struct mgmt_cp_unpair_device *cp = data;
2940 	struct mgmt_rp_unpair_device rp;
2941 	struct hci_conn_params *params;
2942 	struct mgmt_pending_cmd *cmd;
2943 	struct hci_conn *conn;
2944 	u8 addr_type;
2945 	int err;
2946 
2947 	memset(&rp, 0, sizeof(rp));
2948 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2949 	rp.addr.type = cp->addr.type;
2950 
2951 	if (!bdaddr_type_is_valid(cp->addr.type))
2952 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2953 					 MGMT_STATUS_INVALID_PARAMS,
2954 					 &rp, sizeof(rp));
2955 
2956 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2957 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2958 					 MGMT_STATUS_INVALID_PARAMS,
2959 					 &rp, sizeof(rp));
2960 
2961 	hci_dev_lock(hdev);
2962 
2963 	if (!hdev_is_powered(hdev)) {
2964 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2965 					MGMT_STATUS_NOT_POWERED, &rp,
2966 					sizeof(rp));
2967 		goto unlock;
2968 	}
2969 
2970 	if (cp->addr.type == BDADDR_BREDR) {
2971 		/* If disconnection is requested, then look up the
2972 		 * connection. If the remote device is connected, it
2973 		 * will be later used to terminate the link.
2974 		 *
2975 		 * Setting it to NULL explicitly will cause no
2976 		 * termination of the link.
2977 		 */
2978 		if (cp->disconnect)
2979 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2980 						       &cp->addr.bdaddr);
2981 		else
2982 			conn = NULL;
2983 
2984 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2985 		if (err < 0) {
2986 			err = mgmt_cmd_complete(sk, hdev->id,
2987 						MGMT_OP_UNPAIR_DEVICE,
2988 						MGMT_STATUS_NOT_PAIRED, &rp,
2989 						sizeof(rp));
2990 			goto unlock;
2991 		}
2992 
2993 		goto done;
2994 	}
2995 
2996 	/* LE address type */
2997 	addr_type = le_addr_type(cp->addr.type);
2998 
2999 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3000 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3001 	if (err < 0) {
3002 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3003 					MGMT_STATUS_NOT_PAIRED, &rp,
3004 					sizeof(rp));
3005 		goto unlock;
3006 	}
3007 
3008 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3009 	if (!conn) {
3010 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3011 		goto done;
3012 	}
3013 
3014 
3015 	/* Defer clearing up the connection parameters until closing to
3016 	 * give a chance of keeping them if a repairing happens.
3017 	 */
3018 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3019 
3020 	/* Disable auto-connection parameters if present */
3021 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3022 	if (params) {
3023 		if (params->explicit_connect)
3024 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3025 		else
3026 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3027 	}
3028 
3029 	/* If disconnection is not requested, then clear the connection
3030 	 * variable so that the link is not terminated.
3031 	 */
3032 	if (!cp->disconnect)
3033 		conn = NULL;
3034 
3035 done:
3036 	/* If the connection variable is set, then termination of the
3037 	 * link is requested.
3038 	 */
3039 	if (!conn) {
3040 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3041 					&rp, sizeof(rp));
3042 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3043 		goto unlock;
3044 	}
3045 
3046 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3047 			       sizeof(*cp));
3048 	if (!cmd) {
3049 		err = -ENOMEM;
3050 		goto unlock;
3051 	}
3052 
3053 	cmd->cmd_complete = addr_cmd_complete;
3054 
3055 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3056 				 unpair_device_complete);
3057 	if (err < 0)
3058 		mgmt_pending_free(cmd);
3059 
3060 unlock:
3061 	hci_dev_unlock(hdev);
3062 	return err;
3063 }
3064 
disconnect_complete(struct hci_dev * hdev,void * data,int err)3065 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3066 {
3067 	struct mgmt_pending_cmd *cmd = data;
3068 
3069 	cmd->cmd_complete(cmd, mgmt_status(err));
3070 	mgmt_pending_free(cmd);
3071 }
3072 
disconnect_sync(struct hci_dev * hdev,void * data)3073 static int disconnect_sync(struct hci_dev *hdev, void *data)
3074 {
3075 	struct mgmt_pending_cmd *cmd = data;
3076 	struct mgmt_cp_disconnect *cp = cmd->param;
3077 	struct hci_conn *conn;
3078 
3079 	if (cp->addr.type == BDADDR_BREDR)
3080 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3081 					       &cp->addr.bdaddr);
3082 	else
3083 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3084 					       le_addr_type(cp->addr.type));
3085 
3086 	if (!conn)
3087 		return -ENOTCONN;
3088 
3089 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3090 	 * will clean up the connection no matter the error.
3091 	 */
3092 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3093 
3094 	return 0;
3095 }
3096 
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3097 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3098 		      u16 len)
3099 {
3100 	struct mgmt_cp_disconnect *cp = data;
3101 	struct mgmt_rp_disconnect rp;
3102 	struct mgmt_pending_cmd *cmd;
3103 	int err;
3104 
3105 	bt_dev_dbg(hdev, "sock %p", sk);
3106 
3107 	memset(&rp, 0, sizeof(rp));
3108 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3109 	rp.addr.type = cp->addr.type;
3110 
3111 	if (!bdaddr_type_is_valid(cp->addr.type))
3112 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3113 					 MGMT_STATUS_INVALID_PARAMS,
3114 					 &rp, sizeof(rp));
3115 
3116 	hci_dev_lock(hdev);
3117 
3118 	if (!test_bit(HCI_UP, &hdev->flags)) {
3119 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3120 					MGMT_STATUS_NOT_POWERED, &rp,
3121 					sizeof(rp));
3122 		goto failed;
3123 	}
3124 
3125 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3126 	if (!cmd) {
3127 		err = -ENOMEM;
3128 		goto failed;
3129 	}
3130 
3131 	cmd->cmd_complete = generic_cmd_complete;
3132 
3133 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3134 				 disconnect_complete);
3135 	if (err < 0)
3136 		mgmt_pending_free(cmd);
3137 
3138 failed:
3139 	hci_dev_unlock(hdev);
3140 	return err;
3141 }
3142 
link_to_bdaddr(u8 link_type,u8 addr_type)3143 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3144 {
3145 	switch (link_type) {
3146 	case ISO_LINK:
3147 	case LE_LINK:
3148 		switch (addr_type) {
3149 		case ADDR_LE_DEV_PUBLIC:
3150 			return BDADDR_LE_PUBLIC;
3151 
3152 		default:
3153 			/* Fallback to LE Random address type */
3154 			return BDADDR_LE_RANDOM;
3155 		}
3156 
3157 	default:
3158 		/* Fallback to BR/EDR type */
3159 		return BDADDR_BREDR;
3160 	}
3161 }
3162 
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3163 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3164 			   u16 data_len)
3165 {
3166 	struct mgmt_rp_get_connections *rp;
3167 	struct hci_conn *c;
3168 	int err;
3169 	u16 i;
3170 
3171 	bt_dev_dbg(hdev, "sock %p", sk);
3172 
3173 	hci_dev_lock(hdev);
3174 
3175 	if (!hdev_is_powered(hdev)) {
3176 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3177 				      MGMT_STATUS_NOT_POWERED);
3178 		goto unlock;
3179 	}
3180 
3181 	i = 0;
3182 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3183 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3184 			i++;
3185 	}
3186 
3187 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3188 	if (!rp) {
3189 		err = -ENOMEM;
3190 		goto unlock;
3191 	}
3192 
3193 	i = 0;
3194 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3195 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3196 			continue;
3197 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3198 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3199 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3200 			continue;
3201 		i++;
3202 	}
3203 
3204 	rp->conn_count = cpu_to_le16(i);
3205 
3206 	/* Recalculate length in case of filtered SCO connections, etc */
3207 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3208 				struct_size(rp, addr, i));
3209 
3210 	kfree(rp);
3211 
3212 unlock:
3213 	hci_dev_unlock(hdev);
3214 	return err;
3215 }
3216 
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3217 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3218 				   struct mgmt_cp_pin_code_neg_reply *cp)
3219 {
3220 	struct mgmt_pending_cmd *cmd;
3221 	int err;
3222 
3223 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3224 			       sizeof(*cp));
3225 	if (!cmd)
3226 		return -ENOMEM;
3227 
3228 	cmd->cmd_complete = addr_cmd_complete;
3229 
3230 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3231 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3232 	if (err < 0)
3233 		mgmt_pending_remove(cmd);
3234 
3235 	return err;
3236 }
3237 
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3238 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3239 			  u16 len)
3240 {
3241 	struct hci_conn *conn;
3242 	struct mgmt_cp_pin_code_reply *cp = data;
3243 	struct hci_cp_pin_code_reply reply;
3244 	struct mgmt_pending_cmd *cmd;
3245 	int err;
3246 
3247 	bt_dev_dbg(hdev, "sock %p", sk);
3248 
3249 	hci_dev_lock(hdev);
3250 
3251 	if (!hdev_is_powered(hdev)) {
3252 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3253 				      MGMT_STATUS_NOT_POWERED);
3254 		goto failed;
3255 	}
3256 
3257 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3258 	if (!conn) {
3259 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3260 				      MGMT_STATUS_NOT_CONNECTED);
3261 		goto failed;
3262 	}
3263 
3264 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3265 		struct mgmt_cp_pin_code_neg_reply ncp;
3266 
3267 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3268 
3269 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3270 
3271 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3272 		if (err >= 0)
3273 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3274 					      MGMT_STATUS_INVALID_PARAMS);
3275 
3276 		goto failed;
3277 	}
3278 
3279 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3280 	if (!cmd) {
3281 		err = -ENOMEM;
3282 		goto failed;
3283 	}
3284 
3285 	cmd->cmd_complete = addr_cmd_complete;
3286 
3287 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3288 	reply.pin_len = cp->pin_len;
3289 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3290 
3291 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3292 	if (err < 0)
3293 		mgmt_pending_remove(cmd);
3294 
3295 failed:
3296 	hci_dev_unlock(hdev);
3297 	return err;
3298 }
3299 
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3300 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3301 			     u16 len)
3302 {
3303 	struct mgmt_cp_set_io_capability *cp = data;
3304 
3305 	bt_dev_dbg(hdev, "sock %p", sk);
3306 
3307 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3308 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3309 				       MGMT_STATUS_INVALID_PARAMS);
3310 
3311 	hci_dev_lock(hdev);
3312 
3313 	hdev->io_capability = cp->io_capability;
3314 
3315 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3316 
3317 	hci_dev_unlock(hdev);
3318 
3319 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3320 				 NULL, 0);
3321 }
3322 
find_pairing(struct hci_conn * conn)3323 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3324 {
3325 	struct hci_dev *hdev = conn->hdev;
3326 	struct mgmt_pending_cmd *cmd;
3327 
3328 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3329 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3330 			continue;
3331 
3332 		if (cmd->user_data != conn)
3333 			continue;
3334 
3335 		return cmd;
3336 	}
3337 
3338 	return NULL;
3339 }
3340 
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3341 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3342 {
3343 	struct mgmt_rp_pair_device rp;
3344 	struct hci_conn *conn = cmd->user_data;
3345 	int err;
3346 
3347 	bacpy(&rp.addr.bdaddr, &conn->dst);
3348 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3349 
3350 	err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3351 				status, &rp, sizeof(rp));
3352 
3353 	/* So we don't get further callbacks for this connection */
3354 	conn->connect_cfm_cb = NULL;
3355 	conn->security_cfm_cb = NULL;
3356 	conn->disconn_cfm_cb = NULL;
3357 
3358 	hci_conn_drop(conn);
3359 
3360 	/* The device is paired so there is no need to remove
3361 	 * its connection parameters anymore.
3362 	 */
3363 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3364 
3365 	hci_conn_put(conn);
3366 
3367 	return err;
3368 }
3369 
mgmt_smp_complete(struct hci_conn * conn,bool complete)3370 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3371 {
3372 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3373 	struct mgmt_pending_cmd *cmd;
3374 
3375 	cmd = find_pairing(conn);
3376 	if (cmd) {
3377 		cmd->cmd_complete(cmd, status);
3378 		mgmt_pending_remove(cmd);
3379 	}
3380 }
3381 
pairing_complete_cb(struct hci_conn * conn,u8 status)3382 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3383 {
3384 	struct mgmt_pending_cmd *cmd;
3385 
3386 	BT_DBG("status %u", status);
3387 
3388 	cmd = find_pairing(conn);
3389 	if (!cmd) {
3390 		BT_DBG("Unable to find a pending command");
3391 		return;
3392 	}
3393 
3394 	cmd->cmd_complete(cmd, mgmt_status(status));
3395 	mgmt_pending_remove(cmd);
3396 }
3397 
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3398 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3399 {
3400 	struct mgmt_pending_cmd *cmd;
3401 
3402 	BT_DBG("status %u", status);
3403 
3404 	if (!status)
3405 		return;
3406 
3407 	cmd = find_pairing(conn);
3408 	if (!cmd) {
3409 		BT_DBG("Unable to find a pending command");
3410 		return;
3411 	}
3412 
3413 	cmd->cmd_complete(cmd, mgmt_status(status));
3414 	mgmt_pending_remove(cmd);
3415 }
3416 
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3417 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3418 		       u16 len)
3419 {
3420 	struct mgmt_cp_pair_device *cp = data;
3421 	struct mgmt_rp_pair_device rp;
3422 	struct mgmt_pending_cmd *cmd;
3423 	u8 sec_level, auth_type;
3424 	struct hci_conn *conn;
3425 	int err;
3426 
3427 	bt_dev_dbg(hdev, "sock %p", sk);
3428 
3429 	memset(&rp, 0, sizeof(rp));
3430 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3431 	rp.addr.type = cp->addr.type;
3432 
3433 	if (!bdaddr_type_is_valid(cp->addr.type))
3434 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3435 					 MGMT_STATUS_INVALID_PARAMS,
3436 					 &rp, sizeof(rp));
3437 
3438 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3439 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3440 					 MGMT_STATUS_INVALID_PARAMS,
3441 					 &rp, sizeof(rp));
3442 
3443 	hci_dev_lock(hdev);
3444 
3445 	if (!hdev_is_powered(hdev)) {
3446 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3447 					MGMT_STATUS_NOT_POWERED, &rp,
3448 					sizeof(rp));
3449 		goto unlock;
3450 	}
3451 
3452 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3453 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3454 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3455 					sizeof(rp));
3456 		goto unlock;
3457 	}
3458 
3459 	sec_level = BT_SECURITY_MEDIUM;
3460 	auth_type = HCI_AT_DEDICATED_BONDING;
3461 
3462 	if (cp->addr.type == BDADDR_BREDR) {
3463 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3464 				       auth_type, CONN_REASON_PAIR_DEVICE);
3465 	} else {
3466 		u8 addr_type = le_addr_type(cp->addr.type);
3467 		struct hci_conn_params *p;
3468 
3469 		/* When pairing a new device, it is expected to remember
3470 		 * this device for future connections. Adding the connection
3471 		 * parameter information ahead of time allows tracking
3472 		 * of the peripheral preferred values and will speed up any
3473 		 * further connection establishment.
3474 		 *
3475 		 * If connection parameters already exist, then they
3476 		 * will be kept and this function does nothing.
3477 		 */
3478 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3479 		if (!p) {
3480 			err = -EIO;
3481 			goto unlock;
3482 		}
3483 
3484 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3485 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3486 
3487 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3488 					   sec_level, HCI_LE_CONN_TIMEOUT,
3489 					   CONN_REASON_PAIR_DEVICE);
3490 	}
3491 
3492 	if (IS_ERR(conn)) {
3493 		int status;
3494 
3495 		if (PTR_ERR(conn) == -EBUSY)
3496 			status = MGMT_STATUS_BUSY;
3497 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3498 			status = MGMT_STATUS_NOT_SUPPORTED;
3499 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3500 			status = MGMT_STATUS_REJECTED;
3501 		else
3502 			status = MGMT_STATUS_CONNECT_FAILED;
3503 
3504 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3505 					status, &rp, sizeof(rp));
3506 		goto unlock;
3507 	}
3508 
3509 	if (conn->connect_cfm_cb) {
3510 		hci_conn_drop(conn);
3511 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3512 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3513 		goto unlock;
3514 	}
3515 
3516 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3517 	if (!cmd) {
3518 		err = -ENOMEM;
3519 		hci_conn_drop(conn);
3520 		goto unlock;
3521 	}
3522 
3523 	cmd->cmd_complete = pairing_complete;
3524 
3525 	/* For LE, just connecting isn't a proof that the pairing finished */
3526 	if (cp->addr.type == BDADDR_BREDR) {
3527 		conn->connect_cfm_cb = pairing_complete_cb;
3528 		conn->security_cfm_cb = pairing_complete_cb;
3529 		conn->disconn_cfm_cb = pairing_complete_cb;
3530 	} else {
3531 		conn->connect_cfm_cb = le_pairing_complete_cb;
3532 		conn->security_cfm_cb = le_pairing_complete_cb;
3533 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3534 	}
3535 
3536 	conn->io_capability = cp->io_cap;
3537 	cmd->user_data = hci_conn_get(conn);
3538 
3539 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3540 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3541 		cmd->cmd_complete(cmd, 0);
3542 		mgmt_pending_remove(cmd);
3543 	}
3544 
3545 	err = 0;
3546 
3547 unlock:
3548 	hci_dev_unlock(hdev);
3549 	return err;
3550 }
3551 
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3552 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3553 			      u16 len)
3554 {
3555 	struct mgmt_addr_info *addr = data;
3556 	struct mgmt_pending_cmd *cmd;
3557 	struct hci_conn *conn;
3558 	int err;
3559 
3560 	bt_dev_dbg(hdev, "sock %p", sk);
3561 
3562 	hci_dev_lock(hdev);
3563 
3564 	if (!hdev_is_powered(hdev)) {
3565 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3566 				      MGMT_STATUS_NOT_POWERED);
3567 		goto unlock;
3568 	}
3569 
3570 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3571 	if (!cmd) {
3572 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3573 				      MGMT_STATUS_INVALID_PARAMS);
3574 		goto unlock;
3575 	}
3576 
3577 	conn = cmd->user_data;
3578 
3579 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3580 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3581 				      MGMT_STATUS_INVALID_PARAMS);
3582 		goto unlock;
3583 	}
3584 
3585 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3586 	mgmt_pending_remove(cmd);
3587 
3588 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3589 				addr, sizeof(*addr));
3590 
3591 	/* Since user doesn't want to proceed with the connection, abort any
3592 	 * ongoing pairing and then terminate the link if it was created
3593 	 * because of the pair device action.
3594 	 */
3595 	if (addr->type == BDADDR_BREDR)
3596 		hci_remove_link_key(hdev, &addr->bdaddr);
3597 	else
3598 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3599 					      le_addr_type(addr->type));
3600 
3601 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3602 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3603 
3604 unlock:
3605 	hci_dev_unlock(hdev);
3606 	return err;
3607 }
3608 
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3609 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3610 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3611 			     u16 hci_op, __le32 passkey)
3612 {
3613 	struct mgmt_pending_cmd *cmd;
3614 	struct hci_conn *conn;
3615 	int err;
3616 
3617 	hci_dev_lock(hdev);
3618 
3619 	if (!hdev_is_powered(hdev)) {
3620 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3621 					MGMT_STATUS_NOT_POWERED, addr,
3622 					sizeof(*addr));
3623 		goto done;
3624 	}
3625 
3626 	if (addr->type == BDADDR_BREDR)
3627 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3628 	else
3629 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3630 					       le_addr_type(addr->type));
3631 
3632 	if (!conn) {
3633 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3634 					MGMT_STATUS_NOT_CONNECTED, addr,
3635 					sizeof(*addr));
3636 		goto done;
3637 	}
3638 
3639 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3640 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3641 		if (!err)
3642 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3643 						MGMT_STATUS_SUCCESS, addr,
3644 						sizeof(*addr));
3645 		else
3646 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3647 						MGMT_STATUS_FAILED, addr,
3648 						sizeof(*addr));
3649 
3650 		goto done;
3651 	}
3652 
3653 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3654 	if (!cmd) {
3655 		err = -ENOMEM;
3656 		goto done;
3657 	}
3658 
3659 	cmd->cmd_complete = addr_cmd_complete;
3660 
3661 	/* Continue with pairing via HCI */
3662 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3663 		struct hci_cp_user_passkey_reply cp;
3664 
3665 		bacpy(&cp.bdaddr, &addr->bdaddr);
3666 		cp.passkey = passkey;
3667 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3668 	} else
3669 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3670 				   &addr->bdaddr);
3671 
3672 	if (err < 0)
3673 		mgmt_pending_remove(cmd);
3674 
3675 done:
3676 	hci_dev_unlock(hdev);
3677 	return err;
3678 }
3679 
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3680 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3681 			      void *data, u16 len)
3682 {
3683 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3684 
3685 	bt_dev_dbg(hdev, "sock %p", sk);
3686 
3687 	return user_pairing_resp(sk, hdev, &cp->addr,
3688 				MGMT_OP_PIN_CODE_NEG_REPLY,
3689 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3690 }
3691 
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3692 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3693 			      u16 len)
3694 {
3695 	struct mgmt_cp_user_confirm_reply *cp = data;
3696 
3697 	bt_dev_dbg(hdev, "sock %p", sk);
3698 
3699 	if (len != sizeof(*cp))
3700 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3701 				       MGMT_STATUS_INVALID_PARAMS);
3702 
3703 	return user_pairing_resp(sk, hdev, &cp->addr,
3704 				 MGMT_OP_USER_CONFIRM_REPLY,
3705 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3706 }
3707 
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3708 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3709 				  void *data, u16 len)
3710 {
3711 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3712 
3713 	bt_dev_dbg(hdev, "sock %p", sk);
3714 
3715 	return user_pairing_resp(sk, hdev, &cp->addr,
3716 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3717 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3718 }
3719 
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3720 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3721 			      u16 len)
3722 {
3723 	struct mgmt_cp_user_passkey_reply *cp = data;
3724 
3725 	bt_dev_dbg(hdev, "sock %p", sk);
3726 
3727 	return user_pairing_resp(sk, hdev, &cp->addr,
3728 				 MGMT_OP_USER_PASSKEY_REPLY,
3729 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3730 }
3731 
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3732 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3733 				  void *data, u16 len)
3734 {
3735 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3736 
3737 	bt_dev_dbg(hdev, "sock %p", sk);
3738 
3739 	return user_pairing_resp(sk, hdev, &cp->addr,
3740 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3741 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3742 }
3743 
adv_expire_sync(struct hci_dev * hdev,u32 flags)3744 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3745 {
3746 	struct adv_info *adv_instance;
3747 
3748 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3749 	if (!adv_instance)
3750 		return 0;
3751 
3752 	/* stop if current instance doesn't need to be changed */
3753 	if (!(adv_instance->flags & flags))
3754 		return 0;
3755 
3756 	cancel_adv_timeout(hdev);
3757 
3758 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3759 	if (!adv_instance)
3760 		return 0;
3761 
3762 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3763 
3764 	return 0;
3765 }
3766 
name_changed_sync(struct hci_dev * hdev,void * data)3767 static int name_changed_sync(struct hci_dev *hdev, void *data)
3768 {
3769 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3770 }
3771 
set_name_complete(struct hci_dev * hdev,void * data,int err)3772 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3773 {
3774 	struct mgmt_pending_cmd *cmd = data;
3775 	struct mgmt_cp_set_local_name *cp = cmd->param;
3776 	u8 status = mgmt_status(err);
3777 
3778 	bt_dev_dbg(hdev, "err %d", err);
3779 
3780 	if (err == -ECANCELED ||
3781 	    cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3782 		return;
3783 
3784 	if (status) {
3785 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3786 				status);
3787 	} else {
3788 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3789 				  cp, sizeof(*cp));
3790 
3791 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3792 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3793 	}
3794 
3795 	mgmt_pending_remove(cmd);
3796 }
3797 
set_name_sync(struct hci_dev * hdev,void * data)3798 static int set_name_sync(struct hci_dev *hdev, void *data)
3799 {
3800 	if (lmp_bredr_capable(hdev)) {
3801 		hci_update_name_sync(hdev);
3802 		hci_update_eir_sync(hdev);
3803 	}
3804 
3805 	/* The name is stored in the scan response data and so
3806 	 * no need to update the advertising data here.
3807 	 */
3808 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3809 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3810 
3811 	return 0;
3812 }
3813 
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3814 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3815 			  u16 len)
3816 {
3817 	struct mgmt_cp_set_local_name *cp = data;
3818 	struct mgmt_pending_cmd *cmd;
3819 	int err;
3820 
3821 	bt_dev_dbg(hdev, "sock %p", sk);
3822 
3823 	hci_dev_lock(hdev);
3824 
3825 	/* If the old values are the same as the new ones just return a
3826 	 * direct command complete event.
3827 	 */
3828 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3829 	    !memcmp(hdev->short_name, cp->short_name,
3830 		    sizeof(hdev->short_name))) {
3831 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3832 					data, len);
3833 		goto failed;
3834 	}
3835 
3836 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3837 
3838 	if (!hdev_is_powered(hdev)) {
3839 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3840 
3841 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3842 					data, len);
3843 		if (err < 0)
3844 			goto failed;
3845 
3846 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3847 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3848 		ext_info_changed(hdev, sk);
3849 
3850 		goto failed;
3851 	}
3852 
3853 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3854 	if (!cmd)
3855 		err = -ENOMEM;
3856 	else
3857 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3858 					 set_name_complete);
3859 
3860 	if (err < 0) {
3861 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3862 				      MGMT_STATUS_FAILED);
3863 
3864 		if (cmd)
3865 			mgmt_pending_remove(cmd);
3866 
3867 		goto failed;
3868 	}
3869 
3870 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3871 
3872 failed:
3873 	hci_dev_unlock(hdev);
3874 	return err;
3875 }
3876 
appearance_changed_sync(struct hci_dev * hdev,void * data)3877 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3878 {
3879 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3880 }
3881 
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3882 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3883 			  u16 len)
3884 {
3885 	struct mgmt_cp_set_appearance *cp = data;
3886 	u16 appearance;
3887 	int err;
3888 
3889 	bt_dev_dbg(hdev, "sock %p", sk);
3890 
3891 	if (!lmp_le_capable(hdev))
3892 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3893 				       MGMT_STATUS_NOT_SUPPORTED);
3894 
3895 	appearance = le16_to_cpu(cp->appearance);
3896 
3897 	hci_dev_lock(hdev);
3898 
3899 	if (hdev->appearance != appearance) {
3900 		hdev->appearance = appearance;
3901 
3902 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3903 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3904 					   NULL);
3905 
3906 		ext_info_changed(hdev, sk);
3907 	}
3908 
3909 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3910 				0);
3911 
3912 	hci_dev_unlock(hdev);
3913 
3914 	return err;
3915 }
3916 
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3917 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3918 				 void *data, u16 len)
3919 {
3920 	struct mgmt_rp_get_phy_configuration rp;
3921 
3922 	bt_dev_dbg(hdev, "sock %p", sk);
3923 
3924 	hci_dev_lock(hdev);
3925 
3926 	memset(&rp, 0, sizeof(rp));
3927 
3928 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3929 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3930 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3931 
3932 	hci_dev_unlock(hdev);
3933 
3934 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3935 				 &rp, sizeof(rp));
3936 }
3937 
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3938 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3939 {
3940 	struct mgmt_ev_phy_configuration_changed ev;
3941 
3942 	memset(&ev, 0, sizeof(ev));
3943 
3944 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3945 
3946 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3947 			  sizeof(ev), skip);
3948 }
3949 
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3950 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3951 {
3952 	struct mgmt_pending_cmd *cmd = data;
3953 	struct sk_buff *skb = cmd->skb;
3954 	u8 status = mgmt_status(err);
3955 
3956 	if (err == -ECANCELED ||
3957 	    cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3958 		return;
3959 
3960 	if (!status) {
3961 		if (!skb)
3962 			status = MGMT_STATUS_FAILED;
3963 		else if (IS_ERR(skb))
3964 			status = mgmt_status(PTR_ERR(skb));
3965 		else
3966 			status = mgmt_status(skb->data[0]);
3967 	}
3968 
3969 	bt_dev_dbg(hdev, "status %d", status);
3970 
3971 	if (status) {
3972 		mgmt_cmd_status(cmd->sk, hdev->id,
3973 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3974 	} else {
3975 		mgmt_cmd_complete(cmd->sk, hdev->id,
3976 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3977 				  NULL, 0);
3978 
3979 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3980 	}
3981 
3982 	if (skb && !IS_ERR(skb))
3983 		kfree_skb(skb);
3984 
3985 	mgmt_pending_remove(cmd);
3986 }
3987 
set_default_phy_sync(struct hci_dev * hdev,void * data)3988 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3989 {
3990 	struct mgmt_pending_cmd *cmd = data;
3991 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3992 	struct hci_cp_le_set_default_phy cp_phy;
3993 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3994 
3995 	memset(&cp_phy, 0, sizeof(cp_phy));
3996 
3997 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3998 		cp_phy.all_phys |= 0x01;
3999 
4000 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4001 		cp_phy.all_phys |= 0x02;
4002 
4003 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4004 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4005 
4006 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4007 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4008 
4009 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4010 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4011 
4012 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4013 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4014 
4015 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4016 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4017 
4018 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4019 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4020 
4021 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4022 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4023 
4024 	return 0;
4025 }
4026 
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4027 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4028 				 void *data, u16 len)
4029 {
4030 	struct mgmt_cp_set_phy_configuration *cp = data;
4031 	struct mgmt_pending_cmd *cmd;
4032 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4033 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4034 	bool changed = false;
4035 	int err;
4036 
4037 	bt_dev_dbg(hdev, "sock %p", sk);
4038 
4039 	configurable_phys = get_configurable_phys(hdev);
4040 	supported_phys = get_supported_phys(hdev);
4041 	selected_phys = __le32_to_cpu(cp->selected_phys);
4042 
4043 	if (selected_phys & ~supported_phys)
4044 		return mgmt_cmd_status(sk, hdev->id,
4045 				       MGMT_OP_SET_PHY_CONFIGURATION,
4046 				       MGMT_STATUS_INVALID_PARAMS);
4047 
4048 	unconfigure_phys = supported_phys & ~configurable_phys;
4049 
4050 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4051 		return mgmt_cmd_status(sk, hdev->id,
4052 				       MGMT_OP_SET_PHY_CONFIGURATION,
4053 				       MGMT_STATUS_INVALID_PARAMS);
4054 
4055 	if (selected_phys == get_selected_phys(hdev))
4056 		return mgmt_cmd_complete(sk, hdev->id,
4057 					 MGMT_OP_SET_PHY_CONFIGURATION,
4058 					 0, NULL, 0);
4059 
4060 	hci_dev_lock(hdev);
4061 
4062 	if (!hdev_is_powered(hdev)) {
4063 		err = mgmt_cmd_status(sk, hdev->id,
4064 				      MGMT_OP_SET_PHY_CONFIGURATION,
4065 				      MGMT_STATUS_REJECTED);
4066 		goto unlock;
4067 	}
4068 
4069 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4070 		err = mgmt_cmd_status(sk, hdev->id,
4071 				      MGMT_OP_SET_PHY_CONFIGURATION,
4072 				      MGMT_STATUS_BUSY);
4073 		goto unlock;
4074 	}
4075 
4076 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4077 		pkt_type |= (HCI_DH3 | HCI_DM3);
4078 	else
4079 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4080 
4081 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4082 		pkt_type |= (HCI_DH5 | HCI_DM5);
4083 	else
4084 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4085 
4086 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4087 		pkt_type &= ~HCI_2DH1;
4088 	else
4089 		pkt_type |= HCI_2DH1;
4090 
4091 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4092 		pkt_type &= ~HCI_2DH3;
4093 	else
4094 		pkt_type |= HCI_2DH3;
4095 
4096 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4097 		pkt_type &= ~HCI_2DH5;
4098 	else
4099 		pkt_type |= HCI_2DH5;
4100 
4101 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4102 		pkt_type &= ~HCI_3DH1;
4103 	else
4104 		pkt_type |= HCI_3DH1;
4105 
4106 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4107 		pkt_type &= ~HCI_3DH3;
4108 	else
4109 		pkt_type |= HCI_3DH3;
4110 
4111 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4112 		pkt_type &= ~HCI_3DH5;
4113 	else
4114 		pkt_type |= HCI_3DH5;
4115 
4116 	if (pkt_type != hdev->pkt_type) {
4117 		hdev->pkt_type = pkt_type;
4118 		changed = true;
4119 	}
4120 
4121 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4122 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4123 		if (changed)
4124 			mgmt_phy_configuration_changed(hdev, sk);
4125 
4126 		err = mgmt_cmd_complete(sk, hdev->id,
4127 					MGMT_OP_SET_PHY_CONFIGURATION,
4128 					0, NULL, 0);
4129 
4130 		goto unlock;
4131 	}
4132 
4133 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4134 			       len);
4135 	if (!cmd)
4136 		err = -ENOMEM;
4137 	else
4138 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4139 					 set_default_phy_complete);
4140 
4141 	if (err < 0) {
4142 		err = mgmt_cmd_status(sk, hdev->id,
4143 				      MGMT_OP_SET_PHY_CONFIGURATION,
4144 				      MGMT_STATUS_FAILED);
4145 
4146 		if (cmd)
4147 			mgmt_pending_remove(cmd);
4148 	}
4149 
4150 unlock:
4151 	hci_dev_unlock(hdev);
4152 
4153 	return err;
4154 }
4155 
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4156 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4157 			    u16 len)
4158 {
4159 	int err = MGMT_STATUS_SUCCESS;
4160 	struct mgmt_cp_set_blocked_keys *keys = data;
4161 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4162 				   sizeof(struct mgmt_blocked_key_info));
4163 	u16 key_count, expected_len;
4164 	int i;
4165 
4166 	bt_dev_dbg(hdev, "sock %p", sk);
4167 
4168 	key_count = __le16_to_cpu(keys->key_count);
4169 	if (key_count > max_key_count) {
4170 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4171 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4172 				       MGMT_STATUS_INVALID_PARAMS);
4173 	}
4174 
4175 	expected_len = struct_size(keys, keys, key_count);
4176 	if (expected_len != len) {
4177 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4178 			   expected_len, len);
4179 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4180 				       MGMT_STATUS_INVALID_PARAMS);
4181 	}
4182 
4183 	hci_dev_lock(hdev);
4184 
4185 	hci_blocked_keys_clear(hdev);
4186 
4187 	for (i = 0; i < key_count; ++i) {
4188 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4189 
4190 		if (!b) {
4191 			err = MGMT_STATUS_NO_RESOURCES;
4192 			break;
4193 		}
4194 
4195 		b->type = keys->keys[i].type;
4196 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4197 		list_add_rcu(&b->list, &hdev->blocked_keys);
4198 	}
4199 	hci_dev_unlock(hdev);
4200 
4201 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4202 				err, NULL, 0);
4203 }
4204 
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4205 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4206 			       void *data, u16 len)
4207 {
4208 	struct mgmt_mode *cp = data;
4209 	int err;
4210 	bool changed = false;
4211 
4212 	bt_dev_dbg(hdev, "sock %p", sk);
4213 
4214 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4215 		return mgmt_cmd_status(sk, hdev->id,
4216 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4217 				       MGMT_STATUS_NOT_SUPPORTED);
4218 
4219 	if (cp->val != 0x00 && cp->val != 0x01)
4220 		return mgmt_cmd_status(sk, hdev->id,
4221 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4222 				       MGMT_STATUS_INVALID_PARAMS);
4223 
4224 	hci_dev_lock(hdev);
4225 
4226 	if (hdev_is_powered(hdev) &&
4227 	    !!cp->val != hci_dev_test_flag(hdev,
4228 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4229 		err = mgmt_cmd_status(sk, hdev->id,
4230 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4231 				      MGMT_STATUS_REJECTED);
4232 		goto unlock;
4233 	}
4234 
4235 	if (cp->val)
4236 		changed = !hci_dev_test_and_set_flag(hdev,
4237 						   HCI_WIDEBAND_SPEECH_ENABLED);
4238 	else
4239 		changed = hci_dev_test_and_clear_flag(hdev,
4240 						   HCI_WIDEBAND_SPEECH_ENABLED);
4241 
4242 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4243 	if (err < 0)
4244 		goto unlock;
4245 
4246 	if (changed)
4247 		err = new_settings(hdev, sk);
4248 
4249 unlock:
4250 	hci_dev_unlock(hdev);
4251 	return err;
4252 }
4253 
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4254 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4255 			       void *data, u16 data_len)
4256 {
4257 	char buf[20];
4258 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4259 	u16 cap_len = 0;
4260 	u8 flags = 0;
4261 	u8 tx_power_range[2];
4262 
4263 	bt_dev_dbg(hdev, "sock %p", sk);
4264 
4265 	memset(&buf, 0, sizeof(buf));
4266 
4267 	hci_dev_lock(hdev);
4268 
4269 	/* When the Read Simple Pairing Options command is supported, then
4270 	 * the remote public key validation is supported.
4271 	 *
4272 	 * Alternatively, when Microsoft extensions are available, they can
4273 	 * indicate support for public key validation as well.
4274 	 */
4275 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4276 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4277 
4278 	flags |= 0x02;		/* Remote public key validation (LE) */
4279 
4280 	/* When the Read Encryption Key Size command is supported, then the
4281 	 * encryption key size is enforced.
4282 	 */
4283 	if (hdev->commands[20] & 0x10)
4284 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4285 
4286 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4287 
4288 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4289 				  &flags, 1);
4290 
4291 	/* When the Read Simple Pairing Options command is supported, then
4292 	 * also max encryption key size information is provided.
4293 	 */
4294 	if (hdev->commands[41] & 0x08)
4295 		cap_len = eir_append_le16(rp->cap, cap_len,
4296 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4297 					  hdev->max_enc_key_size);
4298 
4299 	cap_len = eir_append_le16(rp->cap, cap_len,
4300 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4301 				  SMP_MAX_ENC_KEY_SIZE);
4302 
4303 	/* Append the min/max LE tx power parameters if we were able to fetch
4304 	 * it from the controller
4305 	 */
4306 	if (hdev->commands[38] & 0x80) {
4307 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4308 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4309 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4310 					  tx_power_range, 2);
4311 	}
4312 
4313 	rp->cap_len = cpu_to_le16(cap_len);
4314 
4315 	hci_dev_unlock(hdev);
4316 
4317 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4318 				 rp, sizeof(*rp) + cap_len);
4319 }
4320 
4321 #ifdef CONFIG_BT_FEATURE_DEBUG
4322 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4323 static const u8 debug_uuid[16] = {
4324 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4325 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4326 };
4327 #endif
4328 
4329 /* 330859bc-7506-492d-9370-9a6f0614037f */
4330 static const u8 quality_report_uuid[16] = {
4331 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4332 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4333 };
4334 
4335 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4336 static const u8 offload_codecs_uuid[16] = {
4337 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4338 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4339 };
4340 
4341 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4342 static const u8 le_simultaneous_roles_uuid[16] = {
4343 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4344 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4345 };
4346 
4347 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4348 static const u8 rpa_resolution_uuid[16] = {
4349 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4350 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4351 };
4352 
4353 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4354 static const u8 iso_socket_uuid[16] = {
4355 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4356 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4357 };
4358 
4359 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4360 static const u8 mgmt_mesh_uuid[16] = {
4361 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4362 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4363 };
4364 
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4365 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4366 				  void *data, u16 data_len)
4367 {
4368 	struct mgmt_rp_read_exp_features_info *rp;
4369 	size_t len;
4370 	u16 idx = 0;
4371 	u32 flags;
4372 	int status;
4373 
4374 	bt_dev_dbg(hdev, "sock %p", sk);
4375 
4376 	/* Enough space for 7 features */
4377 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4378 	rp = kzalloc(len, GFP_KERNEL);
4379 	if (!rp)
4380 		return -ENOMEM;
4381 
4382 #ifdef CONFIG_BT_FEATURE_DEBUG
4383 	if (!hdev) {
4384 		flags = bt_dbg_get() ? BIT(0) : 0;
4385 
4386 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4387 		rp->features[idx].flags = cpu_to_le32(flags);
4388 		idx++;
4389 	}
4390 #endif
4391 
4392 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4393 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4394 			flags = BIT(0);
4395 		else
4396 			flags = 0;
4397 
4398 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4399 		rp->features[idx].flags = cpu_to_le32(flags);
4400 		idx++;
4401 	}
4402 
4403 	if (hdev && ll_privacy_capable(hdev)) {
4404 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4405 			flags = BIT(0) | BIT(1);
4406 		else
4407 			flags = BIT(1);
4408 
4409 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4410 		rp->features[idx].flags = cpu_to_le32(flags);
4411 		idx++;
4412 	}
4413 
4414 	if (hdev && (aosp_has_quality_report(hdev) ||
4415 		     hdev->set_quality_report)) {
4416 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4417 			flags = BIT(0);
4418 		else
4419 			flags = 0;
4420 
4421 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4422 		rp->features[idx].flags = cpu_to_le32(flags);
4423 		idx++;
4424 	}
4425 
4426 	if (hdev && hdev->get_data_path_id) {
4427 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4428 			flags = BIT(0);
4429 		else
4430 			flags = 0;
4431 
4432 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4433 		rp->features[idx].flags = cpu_to_le32(flags);
4434 		idx++;
4435 	}
4436 
4437 	if (IS_ENABLED(CONFIG_BT_LE)) {
4438 		flags = iso_enabled() ? BIT(0) : 0;
4439 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4440 		rp->features[idx].flags = cpu_to_le32(flags);
4441 		idx++;
4442 	}
4443 
4444 	if (hdev && lmp_le_capable(hdev)) {
4445 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4446 			flags = BIT(0);
4447 		else
4448 			flags = 0;
4449 
4450 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4451 		rp->features[idx].flags = cpu_to_le32(flags);
4452 		idx++;
4453 	}
4454 
4455 	rp->feature_count = cpu_to_le16(idx);
4456 
4457 	/* After reading the experimental features information, enable
4458 	 * the events to update client on any future change.
4459 	 */
4460 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4461 
4462 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4463 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4464 				   0, rp, sizeof(*rp) + (20 * idx));
4465 
4466 	kfree(rp);
4467 	return status;
4468 }
4469 
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4470 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4471 					  struct sock *skip)
4472 {
4473 	struct mgmt_ev_exp_feature_changed ev;
4474 
4475 	memset(&ev, 0, sizeof(ev));
4476 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4477 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4478 
4479 	// Do we need to be atomic with the conn_flags?
4480 	if (enabled && privacy_mode_capable(hdev))
4481 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4482 	else
4483 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4484 
4485 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4486 				  &ev, sizeof(ev),
4487 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4488 
4489 }
4490 
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4491 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4492 			       bool enabled, struct sock *skip)
4493 {
4494 	struct mgmt_ev_exp_feature_changed ev;
4495 
4496 	memset(&ev, 0, sizeof(ev));
4497 	memcpy(ev.uuid, uuid, 16);
4498 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4499 
4500 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4501 				  &ev, sizeof(ev),
4502 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4503 }
4504 
4505 #define EXP_FEAT(_uuid, _set_func)	\
4506 {					\
4507 	.uuid = _uuid,			\
4508 	.set_func = _set_func,		\
4509 }
4510 
4511 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4512 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4513 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4514 {
4515 	struct mgmt_rp_set_exp_feature rp;
4516 
4517 	memset(rp.uuid, 0, 16);
4518 	rp.flags = cpu_to_le32(0);
4519 
4520 #ifdef CONFIG_BT_FEATURE_DEBUG
4521 	if (!hdev) {
4522 		bool changed = bt_dbg_get();
4523 
4524 		bt_dbg_set(false);
4525 
4526 		if (changed)
4527 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4528 	}
4529 #endif
4530 
4531 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4532 		bool changed;
4533 
4534 		changed = hci_dev_test_and_clear_flag(hdev,
4535 						      HCI_ENABLE_LL_PRIVACY);
4536 		if (changed)
4537 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4538 					    sk);
4539 	}
4540 
4541 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4542 
4543 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4544 				 MGMT_OP_SET_EXP_FEATURE, 0,
4545 				 &rp, sizeof(rp));
4546 }
4547 
4548 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4549 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4550 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4551 {
4552 	struct mgmt_rp_set_exp_feature rp;
4553 
4554 	bool val, changed;
4555 	int err;
4556 
4557 	/* Command requires to use the non-controller index */
4558 	if (hdev)
4559 		return mgmt_cmd_status(sk, hdev->id,
4560 				       MGMT_OP_SET_EXP_FEATURE,
4561 				       MGMT_STATUS_INVALID_INDEX);
4562 
4563 	/* Parameters are limited to a single octet */
4564 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4565 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4566 				       MGMT_OP_SET_EXP_FEATURE,
4567 				       MGMT_STATUS_INVALID_PARAMS);
4568 
4569 	/* Only boolean on/off is supported */
4570 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4571 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4572 				       MGMT_OP_SET_EXP_FEATURE,
4573 				       MGMT_STATUS_INVALID_PARAMS);
4574 
4575 	val = !!cp->param[0];
4576 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4577 	bt_dbg_set(val);
4578 
4579 	memcpy(rp.uuid, debug_uuid, 16);
4580 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4581 
4582 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4583 
4584 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4585 				MGMT_OP_SET_EXP_FEATURE, 0,
4586 				&rp, sizeof(rp));
4587 
4588 	if (changed)
4589 		exp_feature_changed(hdev, debug_uuid, val, sk);
4590 
4591 	return err;
4592 }
4593 #endif
4594 
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4595 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4596 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4597 {
4598 	struct mgmt_rp_set_exp_feature rp;
4599 	bool val, changed;
4600 	int err;
4601 
4602 	/* Command requires to use the controller index */
4603 	if (!hdev)
4604 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4605 				       MGMT_OP_SET_EXP_FEATURE,
4606 				       MGMT_STATUS_INVALID_INDEX);
4607 
4608 	/* Parameters are limited to a single octet */
4609 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4610 		return mgmt_cmd_status(sk, hdev->id,
4611 				       MGMT_OP_SET_EXP_FEATURE,
4612 				       MGMT_STATUS_INVALID_PARAMS);
4613 
4614 	/* Only boolean on/off is supported */
4615 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4616 		return mgmt_cmd_status(sk, hdev->id,
4617 				       MGMT_OP_SET_EXP_FEATURE,
4618 				       MGMT_STATUS_INVALID_PARAMS);
4619 
4620 	val = !!cp->param[0];
4621 
4622 	if (val) {
4623 		changed = !hci_dev_test_and_set_flag(hdev,
4624 						     HCI_MESH_EXPERIMENTAL);
4625 	} else {
4626 		hci_dev_clear_flag(hdev, HCI_MESH);
4627 		changed = hci_dev_test_and_clear_flag(hdev,
4628 						      HCI_MESH_EXPERIMENTAL);
4629 	}
4630 
4631 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4632 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4633 
4634 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4635 
4636 	err = mgmt_cmd_complete(sk, hdev->id,
4637 				MGMT_OP_SET_EXP_FEATURE, 0,
4638 				&rp, sizeof(rp));
4639 
4640 	if (changed)
4641 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4642 
4643 	return err;
4644 }
4645 
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4646 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4647 				   struct mgmt_cp_set_exp_feature *cp,
4648 				   u16 data_len)
4649 {
4650 	struct mgmt_rp_set_exp_feature rp;
4651 	bool val, changed;
4652 	int err;
4653 	u32 flags;
4654 
4655 	/* Command requires to use the controller index */
4656 	if (!hdev)
4657 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4658 				       MGMT_OP_SET_EXP_FEATURE,
4659 				       MGMT_STATUS_INVALID_INDEX);
4660 
4661 	/* Changes can only be made when controller is powered down */
4662 	if (hdev_is_powered(hdev))
4663 		return mgmt_cmd_status(sk, hdev->id,
4664 				       MGMT_OP_SET_EXP_FEATURE,
4665 				       MGMT_STATUS_REJECTED);
4666 
4667 	/* Parameters are limited to a single octet */
4668 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4669 		return mgmt_cmd_status(sk, hdev->id,
4670 				       MGMT_OP_SET_EXP_FEATURE,
4671 				       MGMT_STATUS_INVALID_PARAMS);
4672 
4673 	/* Only boolean on/off is supported */
4674 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4675 		return mgmt_cmd_status(sk, hdev->id,
4676 				       MGMT_OP_SET_EXP_FEATURE,
4677 				       MGMT_STATUS_INVALID_PARAMS);
4678 
4679 	val = !!cp->param[0];
4680 
4681 	if (val) {
4682 		changed = !hci_dev_test_and_set_flag(hdev,
4683 						     HCI_ENABLE_LL_PRIVACY);
4684 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4685 
4686 		/* Enable LL privacy + supported settings changed */
4687 		flags = BIT(0) | BIT(1);
4688 	} else {
4689 		changed = hci_dev_test_and_clear_flag(hdev,
4690 						      HCI_ENABLE_LL_PRIVACY);
4691 
4692 		/* Disable LL privacy + supported settings changed */
4693 		flags = BIT(1);
4694 	}
4695 
4696 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4697 	rp.flags = cpu_to_le32(flags);
4698 
4699 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4700 
4701 	err = mgmt_cmd_complete(sk, hdev->id,
4702 				MGMT_OP_SET_EXP_FEATURE, 0,
4703 				&rp, sizeof(rp));
4704 
4705 	if (changed)
4706 		exp_ll_privacy_feature_changed(val, hdev, sk);
4707 
4708 	return err;
4709 }
4710 
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4711 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4712 				   struct mgmt_cp_set_exp_feature *cp,
4713 				   u16 data_len)
4714 {
4715 	struct mgmt_rp_set_exp_feature rp;
4716 	bool val, changed;
4717 	int err;
4718 
4719 	/* Command requires to use a valid controller index */
4720 	if (!hdev)
4721 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4722 				       MGMT_OP_SET_EXP_FEATURE,
4723 				       MGMT_STATUS_INVALID_INDEX);
4724 
4725 	/* Parameters are limited to a single octet */
4726 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4727 		return mgmt_cmd_status(sk, hdev->id,
4728 				       MGMT_OP_SET_EXP_FEATURE,
4729 				       MGMT_STATUS_INVALID_PARAMS);
4730 
4731 	/* Only boolean on/off is supported */
4732 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4733 		return mgmt_cmd_status(sk, hdev->id,
4734 				       MGMT_OP_SET_EXP_FEATURE,
4735 				       MGMT_STATUS_INVALID_PARAMS);
4736 
4737 	hci_req_sync_lock(hdev);
4738 
4739 	val = !!cp->param[0];
4740 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4741 
4742 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4743 		err = mgmt_cmd_status(sk, hdev->id,
4744 				      MGMT_OP_SET_EXP_FEATURE,
4745 				      MGMT_STATUS_NOT_SUPPORTED);
4746 		goto unlock_quality_report;
4747 	}
4748 
4749 	if (changed) {
4750 		if (hdev->set_quality_report)
4751 			err = hdev->set_quality_report(hdev, val);
4752 		else
4753 			err = aosp_set_quality_report(hdev, val);
4754 
4755 		if (err) {
4756 			err = mgmt_cmd_status(sk, hdev->id,
4757 					      MGMT_OP_SET_EXP_FEATURE,
4758 					      MGMT_STATUS_FAILED);
4759 			goto unlock_quality_report;
4760 		}
4761 
4762 		if (val)
4763 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4764 		else
4765 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4766 	}
4767 
4768 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4769 
4770 	memcpy(rp.uuid, quality_report_uuid, 16);
4771 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4772 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4773 
4774 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4775 				&rp, sizeof(rp));
4776 
4777 	if (changed)
4778 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4779 
4780 unlock_quality_report:
4781 	hci_req_sync_unlock(hdev);
4782 	return err;
4783 }
4784 
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4785 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4786 				  struct mgmt_cp_set_exp_feature *cp,
4787 				  u16 data_len)
4788 {
4789 	bool val, changed;
4790 	int err;
4791 	struct mgmt_rp_set_exp_feature rp;
4792 
4793 	/* Command requires to use a valid controller index */
4794 	if (!hdev)
4795 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4796 				       MGMT_OP_SET_EXP_FEATURE,
4797 				       MGMT_STATUS_INVALID_INDEX);
4798 
4799 	/* Parameters are limited to a single octet */
4800 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4801 		return mgmt_cmd_status(sk, hdev->id,
4802 				       MGMT_OP_SET_EXP_FEATURE,
4803 				       MGMT_STATUS_INVALID_PARAMS);
4804 
4805 	/* Only boolean on/off is supported */
4806 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4807 		return mgmt_cmd_status(sk, hdev->id,
4808 				       MGMT_OP_SET_EXP_FEATURE,
4809 				       MGMT_STATUS_INVALID_PARAMS);
4810 
4811 	val = !!cp->param[0];
4812 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4813 
4814 	if (!hdev->get_data_path_id) {
4815 		return mgmt_cmd_status(sk, hdev->id,
4816 				       MGMT_OP_SET_EXP_FEATURE,
4817 				       MGMT_STATUS_NOT_SUPPORTED);
4818 	}
4819 
4820 	if (changed) {
4821 		if (val)
4822 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4823 		else
4824 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4825 	}
4826 
4827 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4828 		    val, changed);
4829 
4830 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4831 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4832 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4833 	err = mgmt_cmd_complete(sk, hdev->id,
4834 				MGMT_OP_SET_EXP_FEATURE, 0,
4835 				&rp, sizeof(rp));
4836 
4837 	if (changed)
4838 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4839 
4840 	return err;
4841 }
4842 
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4843 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4844 					  struct mgmt_cp_set_exp_feature *cp,
4845 					  u16 data_len)
4846 {
4847 	bool val, changed;
4848 	int err;
4849 	struct mgmt_rp_set_exp_feature rp;
4850 
4851 	/* Command requires to use a valid controller index */
4852 	if (!hdev)
4853 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4854 				       MGMT_OP_SET_EXP_FEATURE,
4855 				       MGMT_STATUS_INVALID_INDEX);
4856 
4857 	/* Parameters are limited to a single octet */
4858 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4859 		return mgmt_cmd_status(sk, hdev->id,
4860 				       MGMT_OP_SET_EXP_FEATURE,
4861 				       MGMT_STATUS_INVALID_PARAMS);
4862 
4863 	/* Only boolean on/off is supported */
4864 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4865 		return mgmt_cmd_status(sk, hdev->id,
4866 				       MGMT_OP_SET_EXP_FEATURE,
4867 				       MGMT_STATUS_INVALID_PARAMS);
4868 
4869 	val = !!cp->param[0];
4870 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4871 
4872 	if (!hci_dev_le_state_simultaneous(hdev)) {
4873 		return mgmt_cmd_status(sk, hdev->id,
4874 				       MGMT_OP_SET_EXP_FEATURE,
4875 				       MGMT_STATUS_NOT_SUPPORTED);
4876 	}
4877 
4878 	if (changed) {
4879 		if (val)
4880 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4881 		else
4882 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4883 	}
4884 
4885 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4886 		    val, changed);
4887 
4888 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4889 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4890 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4891 	err = mgmt_cmd_complete(sk, hdev->id,
4892 				MGMT_OP_SET_EXP_FEATURE, 0,
4893 				&rp, sizeof(rp));
4894 
4895 	if (changed)
4896 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4897 
4898 	return err;
4899 }
4900 
4901 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4902 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4903 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4904 {
4905 	struct mgmt_rp_set_exp_feature rp;
4906 	bool val, changed = false;
4907 	int err;
4908 
4909 	/* Command requires to use the non-controller index */
4910 	if (hdev)
4911 		return mgmt_cmd_status(sk, hdev->id,
4912 				       MGMT_OP_SET_EXP_FEATURE,
4913 				       MGMT_STATUS_INVALID_INDEX);
4914 
4915 	/* Parameters are limited to a single octet */
4916 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4917 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4918 				       MGMT_OP_SET_EXP_FEATURE,
4919 				       MGMT_STATUS_INVALID_PARAMS);
4920 
4921 	/* Only boolean on/off is supported */
4922 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4923 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4924 				       MGMT_OP_SET_EXP_FEATURE,
4925 				       MGMT_STATUS_INVALID_PARAMS);
4926 
4927 	val = cp->param[0] ? true : false;
4928 	if (val)
4929 		err = iso_init();
4930 	else
4931 		err = iso_exit();
4932 
4933 	if (!err)
4934 		changed = true;
4935 
4936 	memcpy(rp.uuid, iso_socket_uuid, 16);
4937 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4938 
4939 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4940 
4941 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4942 				MGMT_OP_SET_EXP_FEATURE, 0,
4943 				&rp, sizeof(rp));
4944 
4945 	if (changed)
4946 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4947 
4948 	return err;
4949 }
4950 #endif
4951 
4952 static const struct mgmt_exp_feature {
4953 	const u8 *uuid;
4954 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4955 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4956 } exp_features[] = {
4957 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4958 #ifdef CONFIG_BT_FEATURE_DEBUG
4959 	EXP_FEAT(debug_uuid, set_debug_func),
4960 #endif
4961 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4962 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4963 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4964 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4965 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4966 #ifdef CONFIG_BT_LE
4967 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4968 #endif
4969 
4970 	/* end with a null feature */
4971 	EXP_FEAT(NULL, NULL)
4972 };
4973 
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4974 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4975 			   void *data, u16 data_len)
4976 {
4977 	struct mgmt_cp_set_exp_feature *cp = data;
4978 	size_t i = 0;
4979 
4980 	bt_dev_dbg(hdev, "sock %p", sk);
4981 
4982 	for (i = 0; exp_features[i].uuid; i++) {
4983 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4984 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4985 	}
4986 
4987 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4988 			       MGMT_OP_SET_EXP_FEATURE,
4989 			       MGMT_STATUS_NOT_SUPPORTED);
4990 }
4991 
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4992 static u32 get_params_flags(struct hci_dev *hdev,
4993 			    struct hci_conn_params *params)
4994 {
4995 	u32 flags = hdev->conn_flags;
4996 
4997 	/* Devices using RPAs can only be programmed in the acceptlist if
4998 	 * LL Privacy has been enable otherwise they cannot mark
4999 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5000 	 */
5001 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5002 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5003 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5004 
5005 	return flags;
5006 }
5007 
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5008 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5009 			    u16 data_len)
5010 {
5011 	struct mgmt_cp_get_device_flags *cp = data;
5012 	struct mgmt_rp_get_device_flags rp;
5013 	struct bdaddr_list_with_flags *br_params;
5014 	struct hci_conn_params *params;
5015 	u32 supported_flags;
5016 	u32 current_flags = 0;
5017 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5018 
5019 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5020 		   &cp->addr.bdaddr, cp->addr.type);
5021 
5022 	hci_dev_lock(hdev);
5023 
5024 	supported_flags = hdev->conn_flags;
5025 
5026 	memset(&rp, 0, sizeof(rp));
5027 
5028 	if (cp->addr.type == BDADDR_BREDR) {
5029 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5030 							      &cp->addr.bdaddr,
5031 							      cp->addr.type);
5032 		if (!br_params)
5033 			goto done;
5034 
5035 		current_flags = br_params->flags;
5036 	} else {
5037 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5038 						le_addr_type(cp->addr.type));
5039 		if (!params)
5040 			goto done;
5041 
5042 		supported_flags = get_params_flags(hdev, params);
5043 		current_flags = params->flags;
5044 	}
5045 
5046 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5047 	rp.addr.type = cp->addr.type;
5048 	rp.supported_flags = cpu_to_le32(supported_flags);
5049 	rp.current_flags = cpu_to_le32(current_flags);
5050 
5051 	status = MGMT_STATUS_SUCCESS;
5052 
5053 done:
5054 	hci_dev_unlock(hdev);
5055 
5056 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5057 				&rp, sizeof(rp));
5058 }
5059 
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5060 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5061 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5062 				 u32 supported_flags, u32 current_flags)
5063 {
5064 	struct mgmt_ev_device_flags_changed ev;
5065 
5066 	bacpy(&ev.addr.bdaddr, bdaddr);
5067 	ev.addr.type = bdaddr_type;
5068 	ev.supported_flags = cpu_to_le32(supported_flags);
5069 	ev.current_flags = cpu_to_le32(current_flags);
5070 
5071 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5072 }
5073 
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5074 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5075 			    u16 len)
5076 {
5077 	struct mgmt_cp_set_device_flags *cp = data;
5078 	struct bdaddr_list_with_flags *br_params;
5079 	struct hci_conn_params *params;
5080 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5081 	u32 supported_flags;
5082 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5083 
5084 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5085 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5086 
5087 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5088 	supported_flags = hdev->conn_flags;
5089 
5090 	if ((supported_flags | current_flags) != supported_flags) {
5091 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5092 			    current_flags, supported_flags);
5093 		goto done;
5094 	}
5095 
5096 	hci_dev_lock(hdev);
5097 
5098 	if (cp->addr.type == BDADDR_BREDR) {
5099 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5100 							      &cp->addr.bdaddr,
5101 							      cp->addr.type);
5102 
5103 		if (br_params) {
5104 			br_params->flags = current_flags;
5105 			status = MGMT_STATUS_SUCCESS;
5106 		} else {
5107 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5108 				    &cp->addr.bdaddr, cp->addr.type);
5109 		}
5110 
5111 		goto unlock;
5112 	}
5113 
5114 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5115 					le_addr_type(cp->addr.type));
5116 	if (!params) {
5117 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5118 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5119 		goto unlock;
5120 	}
5121 
5122 	supported_flags = get_params_flags(hdev, params);
5123 
5124 	if ((supported_flags | current_flags) != supported_flags) {
5125 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5126 			    current_flags, supported_flags);
5127 		goto unlock;
5128 	}
5129 
5130 	WRITE_ONCE(params->flags, current_flags);
5131 	status = MGMT_STATUS_SUCCESS;
5132 
5133 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5134 	 * has been set.
5135 	 */
5136 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5137 		hci_update_passive_scan(hdev);
5138 
5139 unlock:
5140 	hci_dev_unlock(hdev);
5141 
5142 done:
5143 	if (status == MGMT_STATUS_SUCCESS)
5144 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5145 				     supported_flags, current_flags);
5146 
5147 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5148 				 &cp->addr, sizeof(cp->addr));
5149 }
5150 
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5151 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5152 				   u16 handle)
5153 {
5154 	struct mgmt_ev_adv_monitor_added ev;
5155 
5156 	ev.monitor_handle = cpu_to_le16(handle);
5157 
5158 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5159 }
5160 
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5161 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5162 				     __le16 handle)
5163 {
5164 	struct mgmt_ev_adv_monitor_removed ev;
5165 
5166 	ev.monitor_handle = handle;
5167 
5168 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5169 }
5170 
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5171 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5172 				 void *data, u16 len)
5173 {
5174 	struct adv_monitor *monitor = NULL;
5175 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5176 	int handle, err;
5177 	size_t rp_size = 0;
5178 	__u32 supported = 0;
5179 	__u32 enabled = 0;
5180 	__u16 num_handles = 0;
5181 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5182 
5183 	BT_DBG("request for %s", hdev->name);
5184 
5185 	hci_dev_lock(hdev);
5186 
5187 	if (msft_monitor_supported(hdev))
5188 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5189 
5190 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5191 		handles[num_handles++] = monitor->handle;
5192 
5193 	hci_dev_unlock(hdev);
5194 
5195 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5196 	rp = kmalloc(rp_size, GFP_KERNEL);
5197 	if (!rp)
5198 		return -ENOMEM;
5199 
5200 	/* All supported features are currently enabled */
5201 	enabled = supported;
5202 
5203 	rp->supported_features = cpu_to_le32(supported);
5204 	rp->enabled_features = cpu_to_le32(enabled);
5205 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5206 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5207 	rp->num_handles = cpu_to_le16(num_handles);
5208 	if (num_handles)
5209 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5210 
5211 	err = mgmt_cmd_complete(sk, hdev->id,
5212 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5213 				MGMT_STATUS_SUCCESS, rp, rp_size);
5214 
5215 	kfree(rp);
5216 
5217 	return err;
5218 }
5219 
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5220 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5221 						   void *data, int status)
5222 {
5223 	struct mgmt_rp_add_adv_patterns_monitor rp;
5224 	struct mgmt_pending_cmd *cmd = data;
5225 	struct adv_monitor *monitor = cmd->user_data;
5226 
5227 	hci_dev_lock(hdev);
5228 
5229 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5230 
5231 	if (!status) {
5232 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5233 		hdev->adv_monitors_cnt++;
5234 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5235 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5236 		hci_update_passive_scan(hdev);
5237 	}
5238 
5239 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5240 			  mgmt_status(status), &rp, sizeof(rp));
5241 	mgmt_pending_remove(cmd);
5242 
5243 	hci_dev_unlock(hdev);
5244 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5245 		   rp.monitor_handle, status);
5246 }
5247 
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5248 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5249 {
5250 	struct mgmt_pending_cmd *cmd = data;
5251 	struct adv_monitor *monitor = cmd->user_data;
5252 
5253 	return hci_add_adv_monitor(hdev, monitor);
5254 }
5255 
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5256 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5257 				      struct adv_monitor *m, u8 status,
5258 				      void *data, u16 len, u16 op)
5259 {
5260 	struct mgmt_pending_cmd *cmd;
5261 	int err;
5262 
5263 	hci_dev_lock(hdev);
5264 
5265 	if (status)
5266 		goto unlock;
5267 
5268 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5269 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5270 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5271 		status = MGMT_STATUS_BUSY;
5272 		goto unlock;
5273 	}
5274 
5275 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5276 	if (!cmd) {
5277 		status = MGMT_STATUS_NO_RESOURCES;
5278 		goto unlock;
5279 	}
5280 
5281 	cmd->user_data = m;
5282 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5283 				 mgmt_add_adv_patterns_monitor_complete);
5284 	if (err) {
5285 		if (err == -ENOMEM)
5286 			status = MGMT_STATUS_NO_RESOURCES;
5287 		else
5288 			status = MGMT_STATUS_FAILED;
5289 
5290 		goto unlock;
5291 	}
5292 
5293 	hci_dev_unlock(hdev);
5294 
5295 	return 0;
5296 
5297 unlock:
5298 	hci_free_adv_monitor(hdev, m);
5299 	hci_dev_unlock(hdev);
5300 	return mgmt_cmd_status(sk, hdev->id, op, status);
5301 }
5302 
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5303 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5304 				   struct mgmt_adv_rssi_thresholds *rssi)
5305 {
5306 	if (rssi) {
5307 		m->rssi.low_threshold = rssi->low_threshold;
5308 		m->rssi.low_threshold_timeout =
5309 		    __le16_to_cpu(rssi->low_threshold_timeout);
5310 		m->rssi.high_threshold = rssi->high_threshold;
5311 		m->rssi.high_threshold_timeout =
5312 		    __le16_to_cpu(rssi->high_threshold_timeout);
5313 		m->rssi.sampling_period = rssi->sampling_period;
5314 	} else {
5315 		/* Default values. These numbers are the least constricting
5316 		 * parameters for MSFT API to work, so it behaves as if there
5317 		 * are no rssi parameter to consider. May need to be changed
5318 		 * if other API are to be supported.
5319 		 */
5320 		m->rssi.low_threshold = -127;
5321 		m->rssi.low_threshold_timeout = 60;
5322 		m->rssi.high_threshold = -127;
5323 		m->rssi.high_threshold_timeout = 0;
5324 		m->rssi.sampling_period = 0;
5325 	}
5326 }
5327 
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5328 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5329 				    struct mgmt_adv_pattern *patterns)
5330 {
5331 	u8 offset = 0, length = 0;
5332 	struct adv_pattern *p = NULL;
5333 	int i;
5334 
5335 	for (i = 0; i < pattern_count; i++) {
5336 		offset = patterns[i].offset;
5337 		length = patterns[i].length;
5338 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5339 		    length > HCI_MAX_EXT_AD_LENGTH ||
5340 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5341 			return MGMT_STATUS_INVALID_PARAMS;
5342 
5343 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5344 		if (!p)
5345 			return MGMT_STATUS_NO_RESOURCES;
5346 
5347 		p->ad_type = patterns[i].ad_type;
5348 		p->offset = patterns[i].offset;
5349 		p->length = patterns[i].length;
5350 		memcpy(p->value, patterns[i].value, p->length);
5351 
5352 		INIT_LIST_HEAD(&p->list);
5353 		list_add(&p->list, &m->patterns);
5354 	}
5355 
5356 	return MGMT_STATUS_SUCCESS;
5357 }
5358 
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5359 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5360 				    void *data, u16 len)
5361 {
5362 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5363 	struct adv_monitor *m = NULL;
5364 	u8 status = MGMT_STATUS_SUCCESS;
5365 	size_t expected_size = sizeof(*cp);
5366 
5367 	BT_DBG("request for %s", hdev->name);
5368 
5369 	if (len <= sizeof(*cp)) {
5370 		status = MGMT_STATUS_INVALID_PARAMS;
5371 		goto done;
5372 	}
5373 
5374 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5375 	if (len != expected_size) {
5376 		status = MGMT_STATUS_INVALID_PARAMS;
5377 		goto done;
5378 	}
5379 
5380 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5381 	if (!m) {
5382 		status = MGMT_STATUS_NO_RESOURCES;
5383 		goto done;
5384 	}
5385 
5386 	INIT_LIST_HEAD(&m->patterns);
5387 
5388 	parse_adv_monitor_rssi(m, NULL);
5389 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5390 
5391 done:
5392 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5393 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5394 }
5395 
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5396 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5397 					 void *data, u16 len)
5398 {
5399 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5400 	struct adv_monitor *m = NULL;
5401 	u8 status = MGMT_STATUS_SUCCESS;
5402 	size_t expected_size = sizeof(*cp);
5403 
5404 	BT_DBG("request for %s", hdev->name);
5405 
5406 	if (len <= sizeof(*cp)) {
5407 		status = MGMT_STATUS_INVALID_PARAMS;
5408 		goto done;
5409 	}
5410 
5411 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5412 	if (len != expected_size) {
5413 		status = MGMT_STATUS_INVALID_PARAMS;
5414 		goto done;
5415 	}
5416 
5417 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5418 	if (!m) {
5419 		status = MGMT_STATUS_NO_RESOURCES;
5420 		goto done;
5421 	}
5422 
5423 	INIT_LIST_HEAD(&m->patterns);
5424 
5425 	parse_adv_monitor_rssi(m, &cp->rssi);
5426 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5427 
5428 done:
5429 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5430 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5431 }
5432 
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5433 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5434 					     void *data, int status)
5435 {
5436 	struct mgmt_rp_remove_adv_monitor rp;
5437 	struct mgmt_pending_cmd *cmd = data;
5438 	struct mgmt_cp_remove_adv_monitor *cp;
5439 
5440 	if (status == -ECANCELED)
5441 		return;
5442 
5443 	hci_dev_lock(hdev);
5444 
5445 	cp = cmd->param;
5446 
5447 	rp.monitor_handle = cp->monitor_handle;
5448 
5449 	if (!status) {
5450 		mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5451 		hci_update_passive_scan(hdev);
5452 	}
5453 
5454 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5455 			  mgmt_status(status), &rp, sizeof(rp));
5456 	mgmt_pending_free(cmd);
5457 
5458 	hci_dev_unlock(hdev);
5459 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5460 		   rp.monitor_handle, status);
5461 }
5462 
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5463 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5464 {
5465 	struct mgmt_pending_cmd *cmd = data;
5466 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5467 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5468 
5469 	if (!handle)
5470 		return hci_remove_all_adv_monitor(hdev);
5471 
5472 	return hci_remove_single_adv_monitor(hdev, handle);
5473 }
5474 
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5475 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5476 			      void *data, u16 len)
5477 {
5478 	struct mgmt_pending_cmd *cmd;
5479 	int err, status;
5480 
5481 	hci_dev_lock(hdev);
5482 
5483 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5484 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5485 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5486 		status = MGMT_STATUS_BUSY;
5487 		goto unlock;
5488 	}
5489 
5490 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5491 	if (!cmd) {
5492 		status = MGMT_STATUS_NO_RESOURCES;
5493 		goto unlock;
5494 	}
5495 
5496 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5497 				  mgmt_remove_adv_monitor_complete);
5498 
5499 	if (err) {
5500 		mgmt_pending_free(cmd);
5501 
5502 		if (err == -ENOMEM)
5503 			status = MGMT_STATUS_NO_RESOURCES;
5504 		else
5505 			status = MGMT_STATUS_FAILED;
5506 
5507 		goto unlock;
5508 	}
5509 
5510 	hci_dev_unlock(hdev);
5511 
5512 	return 0;
5513 
5514 unlock:
5515 	hci_dev_unlock(hdev);
5516 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5517 			       status);
5518 }
5519 
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5520 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5521 {
5522 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5523 	size_t rp_size = sizeof(mgmt_rp);
5524 	struct mgmt_pending_cmd *cmd = data;
5525 	struct sk_buff *skb = cmd->skb;
5526 	u8 status = mgmt_status(err);
5527 
5528 	if (!status) {
5529 		if (!skb)
5530 			status = MGMT_STATUS_FAILED;
5531 		else if (IS_ERR(skb))
5532 			status = mgmt_status(PTR_ERR(skb));
5533 		else
5534 			status = mgmt_status(skb->data[0]);
5535 	}
5536 
5537 	bt_dev_dbg(hdev, "status %d", status);
5538 
5539 	if (status) {
5540 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5541 		goto remove;
5542 	}
5543 
5544 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5545 
5546 	if (!bredr_sc_enabled(hdev)) {
5547 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5548 
5549 		if (skb->len < sizeof(*rp)) {
5550 			mgmt_cmd_status(cmd->sk, hdev->id,
5551 					MGMT_OP_READ_LOCAL_OOB_DATA,
5552 					MGMT_STATUS_FAILED);
5553 			goto remove;
5554 		}
5555 
5556 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5557 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5558 
5559 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5560 	} else {
5561 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5562 
5563 		if (skb->len < sizeof(*rp)) {
5564 			mgmt_cmd_status(cmd->sk, hdev->id,
5565 					MGMT_OP_READ_LOCAL_OOB_DATA,
5566 					MGMT_STATUS_FAILED);
5567 			goto remove;
5568 		}
5569 
5570 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5571 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5572 
5573 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5574 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5575 	}
5576 
5577 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5578 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5579 
5580 remove:
5581 	if (skb && !IS_ERR(skb))
5582 		kfree_skb(skb);
5583 
5584 	mgmt_pending_free(cmd);
5585 }
5586 
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5587 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5588 {
5589 	struct mgmt_pending_cmd *cmd = data;
5590 
5591 	if (bredr_sc_enabled(hdev))
5592 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5593 	else
5594 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5595 
5596 	if (IS_ERR(cmd->skb))
5597 		return PTR_ERR(cmd->skb);
5598 	else
5599 		return 0;
5600 }
5601 
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5602 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5603 			       void *data, u16 data_len)
5604 {
5605 	struct mgmt_pending_cmd *cmd;
5606 	int err;
5607 
5608 	bt_dev_dbg(hdev, "sock %p", sk);
5609 
5610 	hci_dev_lock(hdev);
5611 
5612 	if (!hdev_is_powered(hdev)) {
5613 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5614 				      MGMT_STATUS_NOT_POWERED);
5615 		goto unlock;
5616 	}
5617 
5618 	if (!lmp_ssp_capable(hdev)) {
5619 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5620 				      MGMT_STATUS_NOT_SUPPORTED);
5621 		goto unlock;
5622 	}
5623 
5624 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5625 	if (!cmd)
5626 		err = -ENOMEM;
5627 	else
5628 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5629 					 read_local_oob_data_complete);
5630 
5631 	if (err < 0) {
5632 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5633 				      MGMT_STATUS_FAILED);
5634 
5635 		if (cmd)
5636 			mgmt_pending_free(cmd);
5637 	}
5638 
5639 unlock:
5640 	hci_dev_unlock(hdev);
5641 	return err;
5642 }
5643 
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5644 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5645 			       void *data, u16 len)
5646 {
5647 	struct mgmt_addr_info *addr = data;
5648 	int err;
5649 
5650 	bt_dev_dbg(hdev, "sock %p", sk);
5651 
5652 	if (!bdaddr_type_is_valid(addr->type))
5653 		return mgmt_cmd_complete(sk, hdev->id,
5654 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5655 					 MGMT_STATUS_INVALID_PARAMS,
5656 					 addr, sizeof(*addr));
5657 
5658 	hci_dev_lock(hdev);
5659 
5660 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5661 		struct mgmt_cp_add_remote_oob_data *cp = data;
5662 		u8 status;
5663 
5664 		if (cp->addr.type != BDADDR_BREDR) {
5665 			err = mgmt_cmd_complete(sk, hdev->id,
5666 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5667 						MGMT_STATUS_INVALID_PARAMS,
5668 						&cp->addr, sizeof(cp->addr));
5669 			goto unlock;
5670 		}
5671 
5672 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5673 					      cp->addr.type, cp->hash,
5674 					      cp->rand, NULL, NULL);
5675 		if (err < 0)
5676 			status = MGMT_STATUS_FAILED;
5677 		else
5678 			status = MGMT_STATUS_SUCCESS;
5679 
5680 		err = mgmt_cmd_complete(sk, hdev->id,
5681 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5682 					&cp->addr, sizeof(cp->addr));
5683 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5684 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5685 		u8 *rand192, *hash192, *rand256, *hash256;
5686 		u8 status;
5687 
5688 		if (bdaddr_type_is_le(cp->addr.type)) {
5689 			/* Enforce zero-valued 192-bit parameters as
5690 			 * long as legacy SMP OOB isn't implemented.
5691 			 */
5692 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5693 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5694 				err = mgmt_cmd_complete(sk, hdev->id,
5695 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5696 							MGMT_STATUS_INVALID_PARAMS,
5697 							addr, sizeof(*addr));
5698 				goto unlock;
5699 			}
5700 
5701 			rand192 = NULL;
5702 			hash192 = NULL;
5703 		} else {
5704 			/* In case one of the P-192 values is set to zero,
5705 			 * then just disable OOB data for P-192.
5706 			 */
5707 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5708 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5709 				rand192 = NULL;
5710 				hash192 = NULL;
5711 			} else {
5712 				rand192 = cp->rand192;
5713 				hash192 = cp->hash192;
5714 			}
5715 		}
5716 
5717 		/* In case one of the P-256 values is set to zero, then just
5718 		 * disable OOB data for P-256.
5719 		 */
5720 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5721 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5722 			rand256 = NULL;
5723 			hash256 = NULL;
5724 		} else {
5725 			rand256 = cp->rand256;
5726 			hash256 = cp->hash256;
5727 		}
5728 
5729 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5730 					      cp->addr.type, hash192, rand192,
5731 					      hash256, rand256);
5732 		if (err < 0)
5733 			status = MGMT_STATUS_FAILED;
5734 		else
5735 			status = MGMT_STATUS_SUCCESS;
5736 
5737 		err = mgmt_cmd_complete(sk, hdev->id,
5738 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5739 					status, &cp->addr, sizeof(cp->addr));
5740 	} else {
5741 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5742 			   len);
5743 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5744 				      MGMT_STATUS_INVALID_PARAMS);
5745 	}
5746 
5747 unlock:
5748 	hci_dev_unlock(hdev);
5749 	return err;
5750 }
5751 
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5752 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5753 				  void *data, u16 len)
5754 {
5755 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5756 	u8 status;
5757 	int err;
5758 
5759 	bt_dev_dbg(hdev, "sock %p", sk);
5760 
5761 	if (cp->addr.type != BDADDR_BREDR)
5762 		return mgmt_cmd_complete(sk, hdev->id,
5763 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5764 					 MGMT_STATUS_INVALID_PARAMS,
5765 					 &cp->addr, sizeof(cp->addr));
5766 
5767 	hci_dev_lock(hdev);
5768 
5769 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5770 		hci_remote_oob_data_clear(hdev);
5771 		status = MGMT_STATUS_SUCCESS;
5772 		goto done;
5773 	}
5774 
5775 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5776 	if (err < 0)
5777 		status = MGMT_STATUS_INVALID_PARAMS;
5778 	else
5779 		status = MGMT_STATUS_SUCCESS;
5780 
5781 done:
5782 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5783 				status, &cp->addr, sizeof(cp->addr));
5784 
5785 	hci_dev_unlock(hdev);
5786 	return err;
5787 }
5788 
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5789 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5790 {
5791 	struct mgmt_pending_cmd *cmd;
5792 
5793 	bt_dev_dbg(hdev, "status %u", status);
5794 
5795 	hci_dev_lock(hdev);
5796 
5797 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5798 	if (!cmd)
5799 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5800 
5801 	if (!cmd)
5802 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5803 
5804 	if (cmd) {
5805 		cmd->cmd_complete(cmd, mgmt_status(status));
5806 		mgmt_pending_remove(cmd);
5807 	}
5808 
5809 	hci_dev_unlock(hdev);
5810 }
5811 
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5812 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5813 				    uint8_t *mgmt_status)
5814 {
5815 	switch (type) {
5816 	case DISCOV_TYPE_LE:
5817 		*mgmt_status = mgmt_le_support(hdev);
5818 		if (*mgmt_status)
5819 			return false;
5820 		break;
5821 	case DISCOV_TYPE_INTERLEAVED:
5822 		*mgmt_status = mgmt_le_support(hdev);
5823 		if (*mgmt_status)
5824 			return false;
5825 		fallthrough;
5826 	case DISCOV_TYPE_BREDR:
5827 		*mgmt_status = mgmt_bredr_support(hdev);
5828 		if (*mgmt_status)
5829 			return false;
5830 		break;
5831 	default:
5832 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5833 		return false;
5834 	}
5835 
5836 	return true;
5837 }
5838 
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5839 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5840 {
5841 	struct mgmt_pending_cmd *cmd = data;
5842 
5843 	bt_dev_dbg(hdev, "err %d", err);
5844 
5845 	if (err == -ECANCELED)
5846 		return;
5847 
5848 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5849 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5850 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5851 		return;
5852 
5853 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5854 			  cmd->param, 1);
5855 	mgmt_pending_remove(cmd);
5856 
5857 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5858 				DISCOVERY_FINDING);
5859 }
5860 
start_discovery_sync(struct hci_dev * hdev,void * data)5861 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5862 {
5863 	return hci_start_discovery_sync(hdev);
5864 }
5865 
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5866 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5867 				    u16 op, void *data, u16 len)
5868 {
5869 	struct mgmt_cp_start_discovery *cp = data;
5870 	struct mgmt_pending_cmd *cmd;
5871 	u8 status;
5872 	int err;
5873 
5874 	bt_dev_dbg(hdev, "sock %p", sk);
5875 
5876 	hci_dev_lock(hdev);
5877 
5878 	if (!hdev_is_powered(hdev)) {
5879 		err = mgmt_cmd_complete(sk, hdev->id, op,
5880 					MGMT_STATUS_NOT_POWERED,
5881 					&cp->type, sizeof(cp->type));
5882 		goto failed;
5883 	}
5884 
5885 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5886 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5887 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5888 					&cp->type, sizeof(cp->type));
5889 		goto failed;
5890 	}
5891 
5892 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5893 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5894 					&cp->type, sizeof(cp->type));
5895 		goto failed;
5896 	}
5897 
5898 	/* Can't start discovery when it is paused */
5899 	if (hdev->discovery_paused) {
5900 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5901 					&cp->type, sizeof(cp->type));
5902 		goto failed;
5903 	}
5904 
5905 	/* Clear the discovery filter first to free any previously
5906 	 * allocated memory for the UUID list.
5907 	 */
5908 	hci_discovery_filter_clear(hdev);
5909 
5910 	hdev->discovery.type = cp->type;
5911 	hdev->discovery.report_invalid_rssi = false;
5912 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5913 		hdev->discovery.limited = true;
5914 	else
5915 		hdev->discovery.limited = false;
5916 
5917 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5918 	if (!cmd) {
5919 		err = -ENOMEM;
5920 		goto failed;
5921 	}
5922 
5923 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5924 				 start_discovery_complete);
5925 	if (err < 0) {
5926 		mgmt_pending_remove(cmd);
5927 		goto failed;
5928 	}
5929 
5930 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5931 
5932 failed:
5933 	hci_dev_unlock(hdev);
5934 	return err;
5935 }
5936 
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5937 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5938 			   void *data, u16 len)
5939 {
5940 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5941 					data, len);
5942 }
5943 
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5944 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5945 				   void *data, u16 len)
5946 {
5947 	return start_discovery_internal(sk, hdev,
5948 					MGMT_OP_START_LIMITED_DISCOVERY,
5949 					data, len);
5950 }
5951 
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5952 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5953 				   void *data, u16 len)
5954 {
5955 	struct mgmt_cp_start_service_discovery *cp = data;
5956 	struct mgmt_pending_cmd *cmd;
5957 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5958 	u16 uuid_count, expected_len;
5959 	u8 status;
5960 	int err;
5961 
5962 	bt_dev_dbg(hdev, "sock %p", sk);
5963 
5964 	hci_dev_lock(hdev);
5965 
5966 	if (!hdev_is_powered(hdev)) {
5967 		err = mgmt_cmd_complete(sk, hdev->id,
5968 					MGMT_OP_START_SERVICE_DISCOVERY,
5969 					MGMT_STATUS_NOT_POWERED,
5970 					&cp->type, sizeof(cp->type));
5971 		goto failed;
5972 	}
5973 
5974 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5975 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5976 		err = mgmt_cmd_complete(sk, hdev->id,
5977 					MGMT_OP_START_SERVICE_DISCOVERY,
5978 					MGMT_STATUS_BUSY, &cp->type,
5979 					sizeof(cp->type));
5980 		goto failed;
5981 	}
5982 
5983 	if (hdev->discovery_paused) {
5984 		err = mgmt_cmd_complete(sk, hdev->id,
5985 					MGMT_OP_START_SERVICE_DISCOVERY,
5986 					MGMT_STATUS_BUSY, &cp->type,
5987 					sizeof(cp->type));
5988 		goto failed;
5989 	}
5990 
5991 	uuid_count = __le16_to_cpu(cp->uuid_count);
5992 	if (uuid_count > max_uuid_count) {
5993 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5994 			   uuid_count);
5995 		err = mgmt_cmd_complete(sk, hdev->id,
5996 					MGMT_OP_START_SERVICE_DISCOVERY,
5997 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5998 					sizeof(cp->type));
5999 		goto failed;
6000 	}
6001 
6002 	expected_len = sizeof(*cp) + uuid_count * 16;
6003 	if (expected_len != len) {
6004 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6005 			   expected_len, len);
6006 		err = mgmt_cmd_complete(sk, hdev->id,
6007 					MGMT_OP_START_SERVICE_DISCOVERY,
6008 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6009 					sizeof(cp->type));
6010 		goto failed;
6011 	}
6012 
6013 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6014 		err = mgmt_cmd_complete(sk, hdev->id,
6015 					MGMT_OP_START_SERVICE_DISCOVERY,
6016 					status, &cp->type, sizeof(cp->type));
6017 		goto failed;
6018 	}
6019 
6020 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6021 			       hdev, data, len);
6022 	if (!cmd) {
6023 		err = -ENOMEM;
6024 		goto failed;
6025 	}
6026 
6027 	/* Clear the discovery filter first to free any previously
6028 	 * allocated memory for the UUID list.
6029 	 */
6030 	hci_discovery_filter_clear(hdev);
6031 
6032 	hdev->discovery.result_filtering = true;
6033 	hdev->discovery.type = cp->type;
6034 	hdev->discovery.rssi = cp->rssi;
6035 	hdev->discovery.uuid_count = uuid_count;
6036 
6037 	if (uuid_count > 0) {
6038 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6039 						GFP_KERNEL);
6040 		if (!hdev->discovery.uuids) {
6041 			err = mgmt_cmd_complete(sk, hdev->id,
6042 						MGMT_OP_START_SERVICE_DISCOVERY,
6043 						MGMT_STATUS_FAILED,
6044 						&cp->type, sizeof(cp->type));
6045 			mgmt_pending_remove(cmd);
6046 			goto failed;
6047 		}
6048 	}
6049 
6050 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6051 				 start_discovery_complete);
6052 	if (err < 0) {
6053 		mgmt_pending_remove(cmd);
6054 		goto failed;
6055 	}
6056 
6057 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6058 
6059 failed:
6060 	hci_dev_unlock(hdev);
6061 	return err;
6062 }
6063 
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6064 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6065 {
6066 	struct mgmt_pending_cmd *cmd;
6067 
6068 	bt_dev_dbg(hdev, "status %u", status);
6069 
6070 	hci_dev_lock(hdev);
6071 
6072 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6073 	if (cmd) {
6074 		cmd->cmd_complete(cmd, mgmt_status(status));
6075 		mgmt_pending_remove(cmd);
6076 	}
6077 
6078 	hci_dev_unlock(hdev);
6079 }
6080 
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6081 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6082 {
6083 	struct mgmt_pending_cmd *cmd = data;
6084 
6085 	if (err == -ECANCELED ||
6086 	    cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6087 		return;
6088 
6089 	bt_dev_dbg(hdev, "err %d", err);
6090 
6091 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6092 			  cmd->param, 1);
6093 	mgmt_pending_remove(cmd);
6094 
6095 	if (!err)
6096 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6097 }
6098 
stop_discovery_sync(struct hci_dev * hdev,void * data)6099 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6100 {
6101 	return hci_stop_discovery_sync(hdev);
6102 }
6103 
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6104 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6105 			  u16 len)
6106 {
6107 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6108 	struct mgmt_pending_cmd *cmd;
6109 	int err;
6110 
6111 	bt_dev_dbg(hdev, "sock %p", sk);
6112 
6113 	hci_dev_lock(hdev);
6114 
6115 	if (!hci_discovery_active(hdev)) {
6116 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6117 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6118 					sizeof(mgmt_cp->type));
6119 		goto unlock;
6120 	}
6121 
6122 	if (hdev->discovery.type != mgmt_cp->type) {
6123 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6124 					MGMT_STATUS_INVALID_PARAMS,
6125 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6126 		goto unlock;
6127 	}
6128 
6129 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6130 	if (!cmd) {
6131 		err = -ENOMEM;
6132 		goto unlock;
6133 	}
6134 
6135 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6136 				 stop_discovery_complete);
6137 	if (err < 0) {
6138 		mgmt_pending_remove(cmd);
6139 		goto unlock;
6140 	}
6141 
6142 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6143 
6144 unlock:
6145 	hci_dev_unlock(hdev);
6146 	return err;
6147 }
6148 
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6149 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6150 			u16 len)
6151 {
6152 	struct mgmt_cp_confirm_name *cp = data;
6153 	struct inquiry_entry *e;
6154 	int err;
6155 
6156 	bt_dev_dbg(hdev, "sock %p", sk);
6157 
6158 	hci_dev_lock(hdev);
6159 
6160 	if (!hci_discovery_active(hdev)) {
6161 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6162 					MGMT_STATUS_FAILED, &cp->addr,
6163 					sizeof(cp->addr));
6164 		goto failed;
6165 	}
6166 
6167 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6168 	if (!e) {
6169 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6170 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6171 					sizeof(cp->addr));
6172 		goto failed;
6173 	}
6174 
6175 	if (cp->name_known) {
6176 		e->name_state = NAME_KNOWN;
6177 		list_del(&e->list);
6178 	} else {
6179 		e->name_state = NAME_NEEDED;
6180 		hci_inquiry_cache_update_resolve(hdev, e);
6181 	}
6182 
6183 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6184 				&cp->addr, sizeof(cp->addr));
6185 
6186 failed:
6187 	hci_dev_unlock(hdev);
6188 	return err;
6189 }
6190 
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6191 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6192 			u16 len)
6193 {
6194 	struct mgmt_cp_block_device *cp = data;
6195 	u8 status;
6196 	int err;
6197 
6198 	bt_dev_dbg(hdev, "sock %p", sk);
6199 
6200 	if (!bdaddr_type_is_valid(cp->addr.type))
6201 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6202 					 MGMT_STATUS_INVALID_PARAMS,
6203 					 &cp->addr, sizeof(cp->addr));
6204 
6205 	hci_dev_lock(hdev);
6206 
6207 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6208 				  cp->addr.type);
6209 	if (err < 0) {
6210 		status = MGMT_STATUS_FAILED;
6211 		goto done;
6212 	}
6213 
6214 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6215 		   sk);
6216 	status = MGMT_STATUS_SUCCESS;
6217 
6218 done:
6219 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6220 				&cp->addr, sizeof(cp->addr));
6221 
6222 	hci_dev_unlock(hdev);
6223 
6224 	return err;
6225 }
6226 
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6227 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6228 			  u16 len)
6229 {
6230 	struct mgmt_cp_unblock_device *cp = data;
6231 	u8 status;
6232 	int err;
6233 
6234 	bt_dev_dbg(hdev, "sock %p", sk);
6235 
6236 	if (!bdaddr_type_is_valid(cp->addr.type))
6237 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6238 					 MGMT_STATUS_INVALID_PARAMS,
6239 					 &cp->addr, sizeof(cp->addr));
6240 
6241 	hci_dev_lock(hdev);
6242 
6243 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6244 				  cp->addr.type);
6245 	if (err < 0) {
6246 		status = MGMT_STATUS_INVALID_PARAMS;
6247 		goto done;
6248 	}
6249 
6250 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6251 		   sk);
6252 	status = MGMT_STATUS_SUCCESS;
6253 
6254 done:
6255 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6256 				&cp->addr, sizeof(cp->addr));
6257 
6258 	hci_dev_unlock(hdev);
6259 
6260 	return err;
6261 }
6262 
set_device_id_sync(struct hci_dev * hdev,void * data)6263 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6264 {
6265 	return hci_update_eir_sync(hdev);
6266 }
6267 
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6268 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6269 			 u16 len)
6270 {
6271 	struct mgmt_cp_set_device_id *cp = data;
6272 	int err;
6273 	__u16 source;
6274 
6275 	bt_dev_dbg(hdev, "sock %p", sk);
6276 
6277 	source = __le16_to_cpu(cp->source);
6278 
6279 	if (source > 0x0002)
6280 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6281 				       MGMT_STATUS_INVALID_PARAMS);
6282 
6283 	hci_dev_lock(hdev);
6284 
6285 	hdev->devid_source = source;
6286 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6287 	hdev->devid_product = __le16_to_cpu(cp->product);
6288 	hdev->devid_version = __le16_to_cpu(cp->version);
6289 
6290 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6291 				NULL, 0);
6292 
6293 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6294 
6295 	hci_dev_unlock(hdev);
6296 
6297 	return err;
6298 }
6299 
enable_advertising_instance(struct hci_dev * hdev,int err)6300 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6301 {
6302 	if (err)
6303 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6304 	else
6305 		bt_dev_dbg(hdev, "status %d", err);
6306 }
6307 
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6308 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6309 {
6310 	struct cmd_lookup match = { NULL, hdev };
6311 	u8 instance;
6312 	struct adv_info *adv_instance;
6313 	u8 status = mgmt_status(err);
6314 
6315 	if (status) {
6316 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
6317 				     cmd_status_rsp, &status);
6318 		return;
6319 	}
6320 
6321 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6322 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6323 	else
6324 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6325 
6326 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
6327 			     &match);
6328 
6329 	new_settings(hdev, match.sk);
6330 
6331 	if (match.sk)
6332 		sock_put(match.sk);
6333 
6334 	/* If "Set Advertising" was just disabled and instance advertising was
6335 	 * set up earlier, then re-enable multi-instance advertising.
6336 	 */
6337 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6338 	    list_empty(&hdev->adv_instances))
6339 		return;
6340 
6341 	instance = hdev->cur_adv_instance;
6342 	if (!instance) {
6343 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6344 							struct adv_info, list);
6345 		if (!adv_instance)
6346 			return;
6347 
6348 		instance = adv_instance->instance;
6349 	}
6350 
6351 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6352 
6353 	enable_advertising_instance(hdev, err);
6354 }
6355 
set_adv_sync(struct hci_dev * hdev,void * data)6356 static int set_adv_sync(struct hci_dev *hdev, void *data)
6357 {
6358 	struct mgmt_pending_cmd *cmd = data;
6359 	struct mgmt_mode *cp = cmd->param;
6360 	u8 val = !!cp->val;
6361 
6362 	if (cp->val == 0x02)
6363 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6364 	else
6365 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6366 
6367 	cancel_adv_timeout(hdev);
6368 
6369 	if (val) {
6370 		/* Switch to instance "0" for the Set Advertising setting.
6371 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6372 		 * HCI_ADVERTISING flag is not yet set.
6373 		 */
6374 		hdev->cur_adv_instance = 0x00;
6375 
6376 		if (ext_adv_capable(hdev)) {
6377 			hci_start_ext_adv_sync(hdev, 0x00);
6378 		} else {
6379 			hci_update_adv_data_sync(hdev, 0x00);
6380 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6381 			hci_enable_advertising_sync(hdev);
6382 		}
6383 	} else {
6384 		hci_disable_advertising_sync(hdev);
6385 	}
6386 
6387 	return 0;
6388 }
6389 
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6390 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6391 			   u16 len)
6392 {
6393 	struct mgmt_mode *cp = data;
6394 	struct mgmt_pending_cmd *cmd;
6395 	u8 val, status;
6396 	int err;
6397 
6398 	bt_dev_dbg(hdev, "sock %p", sk);
6399 
6400 	status = mgmt_le_support(hdev);
6401 	if (status)
6402 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6403 				       status);
6404 
6405 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6406 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6407 				       MGMT_STATUS_INVALID_PARAMS);
6408 
6409 	if (hdev->advertising_paused)
6410 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6411 				       MGMT_STATUS_BUSY);
6412 
6413 	hci_dev_lock(hdev);
6414 
6415 	val = !!cp->val;
6416 
6417 	/* The following conditions are ones which mean that we should
6418 	 * not do any HCI communication but directly send a mgmt
6419 	 * response to user space (after toggling the flag if
6420 	 * necessary).
6421 	 */
6422 	if (!hdev_is_powered(hdev) ||
6423 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6424 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6425 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6426 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6427 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6428 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6429 		bool changed;
6430 
6431 		if (cp->val) {
6432 			hdev->cur_adv_instance = 0x00;
6433 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6434 			if (cp->val == 0x02)
6435 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6436 			else
6437 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6438 		} else {
6439 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6440 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6441 		}
6442 
6443 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6444 		if (err < 0)
6445 			goto unlock;
6446 
6447 		if (changed)
6448 			err = new_settings(hdev, sk);
6449 
6450 		goto unlock;
6451 	}
6452 
6453 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6454 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6455 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6456 				      MGMT_STATUS_BUSY);
6457 		goto unlock;
6458 	}
6459 
6460 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6461 	if (!cmd)
6462 		err = -ENOMEM;
6463 	else
6464 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6465 					 set_advertising_complete);
6466 
6467 	if (err < 0 && cmd)
6468 		mgmt_pending_remove(cmd);
6469 
6470 unlock:
6471 	hci_dev_unlock(hdev);
6472 	return err;
6473 }
6474 
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6475 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6476 			      void *data, u16 len)
6477 {
6478 	struct mgmt_cp_set_static_address *cp = data;
6479 	int err;
6480 
6481 	bt_dev_dbg(hdev, "sock %p", sk);
6482 
6483 	if (!lmp_le_capable(hdev))
6484 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6485 				       MGMT_STATUS_NOT_SUPPORTED);
6486 
6487 	if (hdev_is_powered(hdev))
6488 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6489 				       MGMT_STATUS_REJECTED);
6490 
6491 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6492 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6493 			return mgmt_cmd_status(sk, hdev->id,
6494 					       MGMT_OP_SET_STATIC_ADDRESS,
6495 					       MGMT_STATUS_INVALID_PARAMS);
6496 
6497 		/* Two most significant bits shall be set */
6498 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6499 			return mgmt_cmd_status(sk, hdev->id,
6500 					       MGMT_OP_SET_STATIC_ADDRESS,
6501 					       MGMT_STATUS_INVALID_PARAMS);
6502 	}
6503 
6504 	hci_dev_lock(hdev);
6505 
6506 	bacpy(&hdev->static_addr, &cp->bdaddr);
6507 
6508 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6509 	if (err < 0)
6510 		goto unlock;
6511 
6512 	err = new_settings(hdev, sk);
6513 
6514 unlock:
6515 	hci_dev_unlock(hdev);
6516 	return err;
6517 }
6518 
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6519 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6520 			   void *data, u16 len)
6521 {
6522 	struct mgmt_cp_set_scan_params *cp = data;
6523 	__u16 interval, window;
6524 	int err;
6525 
6526 	bt_dev_dbg(hdev, "sock %p", sk);
6527 
6528 	if (!lmp_le_capable(hdev))
6529 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6530 				       MGMT_STATUS_NOT_SUPPORTED);
6531 
6532 	interval = __le16_to_cpu(cp->interval);
6533 
6534 	if (interval < 0x0004 || interval > 0x4000)
6535 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6536 				       MGMT_STATUS_INVALID_PARAMS);
6537 
6538 	window = __le16_to_cpu(cp->window);
6539 
6540 	if (window < 0x0004 || window > 0x4000)
6541 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6542 				       MGMT_STATUS_INVALID_PARAMS);
6543 
6544 	if (window > interval)
6545 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6546 				       MGMT_STATUS_INVALID_PARAMS);
6547 
6548 	hci_dev_lock(hdev);
6549 
6550 	hdev->le_scan_interval = interval;
6551 	hdev->le_scan_window = window;
6552 
6553 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6554 				NULL, 0);
6555 
6556 	/* If background scan is running, restart it so new parameters are
6557 	 * loaded.
6558 	 */
6559 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6560 	    hdev->discovery.state == DISCOVERY_STOPPED)
6561 		hci_update_passive_scan(hdev);
6562 
6563 	hci_dev_unlock(hdev);
6564 
6565 	return err;
6566 }
6567 
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6568 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6569 {
6570 	struct mgmt_pending_cmd *cmd = data;
6571 
6572 	bt_dev_dbg(hdev, "err %d", err);
6573 
6574 	if (err) {
6575 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6576 				mgmt_status(err));
6577 	} else {
6578 		struct mgmt_mode *cp = cmd->param;
6579 
6580 		if (cp->val)
6581 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6582 		else
6583 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6584 
6585 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6586 		new_settings(hdev, cmd->sk);
6587 	}
6588 
6589 	mgmt_pending_free(cmd);
6590 }
6591 
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6592 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6593 {
6594 	struct mgmt_pending_cmd *cmd = data;
6595 	struct mgmt_mode *cp = cmd->param;
6596 
6597 	return hci_write_fast_connectable_sync(hdev, cp->val);
6598 }
6599 
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6600 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6601 				void *data, u16 len)
6602 {
6603 	struct mgmt_mode *cp = data;
6604 	struct mgmt_pending_cmd *cmd;
6605 	int err;
6606 
6607 	bt_dev_dbg(hdev, "sock %p", sk);
6608 
6609 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6610 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6611 		return mgmt_cmd_status(sk, hdev->id,
6612 				       MGMT_OP_SET_FAST_CONNECTABLE,
6613 				       MGMT_STATUS_NOT_SUPPORTED);
6614 
6615 	if (cp->val != 0x00 && cp->val != 0x01)
6616 		return mgmt_cmd_status(sk, hdev->id,
6617 				       MGMT_OP_SET_FAST_CONNECTABLE,
6618 				       MGMT_STATUS_INVALID_PARAMS);
6619 
6620 	hci_dev_lock(hdev);
6621 
6622 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6623 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6624 		goto unlock;
6625 	}
6626 
6627 	if (!hdev_is_powered(hdev)) {
6628 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6629 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6630 		new_settings(hdev, sk);
6631 		goto unlock;
6632 	}
6633 
6634 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6635 			       len);
6636 	if (!cmd)
6637 		err = -ENOMEM;
6638 	else
6639 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6640 					 fast_connectable_complete);
6641 
6642 	if (err < 0) {
6643 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6644 				MGMT_STATUS_FAILED);
6645 
6646 		if (cmd)
6647 			mgmt_pending_free(cmd);
6648 	}
6649 
6650 unlock:
6651 	hci_dev_unlock(hdev);
6652 
6653 	return err;
6654 }
6655 
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6656 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6657 {
6658 	struct mgmt_pending_cmd *cmd = data;
6659 
6660 	bt_dev_dbg(hdev, "err %d", err);
6661 
6662 	if (err) {
6663 		u8 mgmt_err = mgmt_status(err);
6664 
6665 		/* We need to restore the flag if related HCI commands
6666 		 * failed.
6667 		 */
6668 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6669 
6670 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6671 	} else {
6672 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6673 		new_settings(hdev, cmd->sk);
6674 	}
6675 
6676 	mgmt_pending_free(cmd);
6677 }
6678 
set_bredr_sync(struct hci_dev * hdev,void * data)6679 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6680 {
6681 	int status;
6682 
6683 	status = hci_write_fast_connectable_sync(hdev, false);
6684 
6685 	if (!status)
6686 		status = hci_update_scan_sync(hdev);
6687 
6688 	/* Since only the advertising data flags will change, there
6689 	 * is no need to update the scan response data.
6690 	 */
6691 	if (!status)
6692 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6693 
6694 	return status;
6695 }
6696 
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6697 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6698 {
6699 	struct mgmt_mode *cp = data;
6700 	struct mgmt_pending_cmd *cmd;
6701 	int err;
6702 
6703 	bt_dev_dbg(hdev, "sock %p", sk);
6704 
6705 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6706 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6707 				       MGMT_STATUS_NOT_SUPPORTED);
6708 
6709 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6710 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6711 				       MGMT_STATUS_REJECTED);
6712 
6713 	if (cp->val != 0x00 && cp->val != 0x01)
6714 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6715 				       MGMT_STATUS_INVALID_PARAMS);
6716 
6717 	hci_dev_lock(hdev);
6718 
6719 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6720 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6721 		goto unlock;
6722 	}
6723 
6724 	if (!hdev_is_powered(hdev)) {
6725 		if (!cp->val) {
6726 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6727 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6728 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6729 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6730 		}
6731 
6732 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6733 
6734 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6735 		if (err < 0)
6736 			goto unlock;
6737 
6738 		err = new_settings(hdev, sk);
6739 		goto unlock;
6740 	}
6741 
6742 	/* Reject disabling when powered on */
6743 	if (!cp->val) {
6744 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6745 				      MGMT_STATUS_REJECTED);
6746 		goto unlock;
6747 	} else {
6748 		/* When configuring a dual-mode controller to operate
6749 		 * with LE only and using a static address, then switching
6750 		 * BR/EDR back on is not allowed.
6751 		 *
6752 		 * Dual-mode controllers shall operate with the public
6753 		 * address as its identity address for BR/EDR and LE. So
6754 		 * reject the attempt to create an invalid configuration.
6755 		 *
6756 		 * The same restrictions applies when secure connections
6757 		 * has been enabled. For BR/EDR this is a controller feature
6758 		 * while for LE it is a host stack feature. This means that
6759 		 * switching BR/EDR back on when secure connections has been
6760 		 * enabled is not a supported transaction.
6761 		 */
6762 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6763 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6764 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6765 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6766 					      MGMT_STATUS_REJECTED);
6767 			goto unlock;
6768 		}
6769 	}
6770 
6771 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6772 	if (!cmd)
6773 		err = -ENOMEM;
6774 	else
6775 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6776 					 set_bredr_complete);
6777 
6778 	if (err < 0) {
6779 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6780 				MGMT_STATUS_FAILED);
6781 		if (cmd)
6782 			mgmt_pending_free(cmd);
6783 
6784 		goto unlock;
6785 	}
6786 
6787 	/* We need to flip the bit already here so that
6788 	 * hci_req_update_adv_data generates the correct flags.
6789 	 */
6790 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6791 
6792 unlock:
6793 	hci_dev_unlock(hdev);
6794 	return err;
6795 }
6796 
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6797 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6798 {
6799 	struct mgmt_pending_cmd *cmd = data;
6800 	struct mgmt_mode *cp;
6801 
6802 	bt_dev_dbg(hdev, "err %d", err);
6803 
6804 	if (err) {
6805 		u8 mgmt_err = mgmt_status(err);
6806 
6807 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6808 		goto done;
6809 	}
6810 
6811 	cp = cmd->param;
6812 
6813 	switch (cp->val) {
6814 	case 0x00:
6815 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6816 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6817 		break;
6818 	case 0x01:
6819 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6820 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6821 		break;
6822 	case 0x02:
6823 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6824 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6825 		break;
6826 	}
6827 
6828 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6829 	new_settings(hdev, cmd->sk);
6830 
6831 done:
6832 	mgmt_pending_free(cmd);
6833 }
6834 
set_secure_conn_sync(struct hci_dev * hdev,void * data)6835 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6836 {
6837 	struct mgmt_pending_cmd *cmd = data;
6838 	struct mgmt_mode *cp = cmd->param;
6839 	u8 val = !!cp->val;
6840 
6841 	/* Force write of val */
6842 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6843 
6844 	return hci_write_sc_support_sync(hdev, val);
6845 }
6846 
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6847 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6848 			   void *data, u16 len)
6849 {
6850 	struct mgmt_mode *cp = data;
6851 	struct mgmt_pending_cmd *cmd;
6852 	u8 val;
6853 	int err;
6854 
6855 	bt_dev_dbg(hdev, "sock %p", sk);
6856 
6857 	if (!lmp_sc_capable(hdev) &&
6858 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6859 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6860 				       MGMT_STATUS_NOT_SUPPORTED);
6861 
6862 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6863 	    lmp_sc_capable(hdev) &&
6864 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6865 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6866 				       MGMT_STATUS_REJECTED);
6867 
6868 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6869 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6870 				       MGMT_STATUS_INVALID_PARAMS);
6871 
6872 	hci_dev_lock(hdev);
6873 
6874 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6875 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6876 		bool changed;
6877 
6878 		if (cp->val) {
6879 			changed = !hci_dev_test_and_set_flag(hdev,
6880 							     HCI_SC_ENABLED);
6881 			if (cp->val == 0x02)
6882 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6883 			else
6884 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6885 		} else {
6886 			changed = hci_dev_test_and_clear_flag(hdev,
6887 							      HCI_SC_ENABLED);
6888 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6889 		}
6890 
6891 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6892 		if (err < 0)
6893 			goto failed;
6894 
6895 		if (changed)
6896 			err = new_settings(hdev, sk);
6897 
6898 		goto failed;
6899 	}
6900 
6901 	val = !!cp->val;
6902 
6903 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6904 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6905 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6906 		goto failed;
6907 	}
6908 
6909 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6910 	if (!cmd)
6911 		err = -ENOMEM;
6912 	else
6913 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6914 					 set_secure_conn_complete);
6915 
6916 	if (err < 0) {
6917 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6918 				MGMT_STATUS_FAILED);
6919 		if (cmd)
6920 			mgmt_pending_free(cmd);
6921 	}
6922 
6923 failed:
6924 	hci_dev_unlock(hdev);
6925 	return err;
6926 }
6927 
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6928 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6929 			  void *data, u16 len)
6930 {
6931 	struct mgmt_mode *cp = data;
6932 	bool changed, use_changed;
6933 	int err;
6934 
6935 	bt_dev_dbg(hdev, "sock %p", sk);
6936 
6937 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6939 				       MGMT_STATUS_INVALID_PARAMS);
6940 
6941 	hci_dev_lock(hdev);
6942 
6943 	if (cp->val)
6944 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6945 	else
6946 		changed = hci_dev_test_and_clear_flag(hdev,
6947 						      HCI_KEEP_DEBUG_KEYS);
6948 
6949 	if (cp->val == 0x02)
6950 		use_changed = !hci_dev_test_and_set_flag(hdev,
6951 							 HCI_USE_DEBUG_KEYS);
6952 	else
6953 		use_changed = hci_dev_test_and_clear_flag(hdev,
6954 							  HCI_USE_DEBUG_KEYS);
6955 
6956 	if (hdev_is_powered(hdev) && use_changed &&
6957 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6958 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6959 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6960 			     sizeof(mode), &mode);
6961 	}
6962 
6963 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6964 	if (err < 0)
6965 		goto unlock;
6966 
6967 	if (changed)
6968 		err = new_settings(hdev, sk);
6969 
6970 unlock:
6971 	hci_dev_unlock(hdev);
6972 	return err;
6973 }
6974 
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6975 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6976 		       u16 len)
6977 {
6978 	struct mgmt_cp_set_privacy *cp = cp_data;
6979 	bool changed;
6980 	int err;
6981 
6982 	bt_dev_dbg(hdev, "sock %p", sk);
6983 
6984 	if (!lmp_le_capable(hdev))
6985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6986 				       MGMT_STATUS_NOT_SUPPORTED);
6987 
6988 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6990 				       MGMT_STATUS_INVALID_PARAMS);
6991 
6992 	if (hdev_is_powered(hdev))
6993 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6994 				       MGMT_STATUS_REJECTED);
6995 
6996 	hci_dev_lock(hdev);
6997 
6998 	/* If user space supports this command it is also expected to
6999 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7000 	 */
7001 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7002 
7003 	if (cp->privacy) {
7004 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7005 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7006 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7007 		hci_adv_instances_set_rpa_expired(hdev, true);
7008 		if (cp->privacy == 0x02)
7009 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7010 		else
7011 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7012 	} else {
7013 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7014 		memset(hdev->irk, 0, sizeof(hdev->irk));
7015 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7016 		hci_adv_instances_set_rpa_expired(hdev, false);
7017 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7018 	}
7019 
7020 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7021 	if (err < 0)
7022 		goto unlock;
7023 
7024 	if (changed)
7025 		err = new_settings(hdev, sk);
7026 
7027 unlock:
7028 	hci_dev_unlock(hdev);
7029 	return err;
7030 }
7031 
irk_is_valid(struct mgmt_irk_info * irk)7032 static bool irk_is_valid(struct mgmt_irk_info *irk)
7033 {
7034 	switch (irk->addr.type) {
7035 	case BDADDR_LE_PUBLIC:
7036 		return true;
7037 
7038 	case BDADDR_LE_RANDOM:
7039 		/* Two most significant bits shall be set */
7040 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7041 			return false;
7042 		return true;
7043 	}
7044 
7045 	return false;
7046 }
7047 
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7048 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7049 		     u16 len)
7050 {
7051 	struct mgmt_cp_load_irks *cp = cp_data;
7052 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7053 				   sizeof(struct mgmt_irk_info));
7054 	u16 irk_count, expected_len;
7055 	int i, err;
7056 
7057 	bt_dev_dbg(hdev, "sock %p", sk);
7058 
7059 	if (!lmp_le_capable(hdev))
7060 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7061 				       MGMT_STATUS_NOT_SUPPORTED);
7062 
7063 	irk_count = __le16_to_cpu(cp->irk_count);
7064 	if (irk_count > max_irk_count) {
7065 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7066 			   irk_count);
7067 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7068 				       MGMT_STATUS_INVALID_PARAMS);
7069 	}
7070 
7071 	expected_len = struct_size(cp, irks, irk_count);
7072 	if (expected_len != len) {
7073 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7074 			   expected_len, len);
7075 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7076 				       MGMT_STATUS_INVALID_PARAMS);
7077 	}
7078 
7079 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7080 
7081 	for (i = 0; i < irk_count; i++) {
7082 		struct mgmt_irk_info *key = &cp->irks[i];
7083 
7084 		if (!irk_is_valid(key))
7085 			return mgmt_cmd_status(sk, hdev->id,
7086 					       MGMT_OP_LOAD_IRKS,
7087 					       MGMT_STATUS_INVALID_PARAMS);
7088 	}
7089 
7090 	hci_dev_lock(hdev);
7091 
7092 	hci_smp_irks_clear(hdev);
7093 
7094 	for (i = 0; i < irk_count; i++) {
7095 		struct mgmt_irk_info *irk = &cp->irks[i];
7096 
7097 		if (hci_is_blocked_key(hdev,
7098 				       HCI_BLOCKED_KEY_TYPE_IRK,
7099 				       irk->val)) {
7100 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7101 				    &irk->addr.bdaddr);
7102 			continue;
7103 		}
7104 
7105 		hci_add_irk(hdev, &irk->addr.bdaddr,
7106 			    le_addr_type(irk->addr.type), irk->val,
7107 			    BDADDR_ANY);
7108 	}
7109 
7110 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7111 
7112 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7113 
7114 	hci_dev_unlock(hdev);
7115 
7116 	return err;
7117 }
7118 
ltk_is_valid(struct mgmt_ltk_info * key)7119 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7120 {
7121 	if (key->initiator != 0x00 && key->initiator != 0x01)
7122 		return false;
7123 
7124 	switch (key->addr.type) {
7125 	case BDADDR_LE_PUBLIC:
7126 		return true;
7127 
7128 	case BDADDR_LE_RANDOM:
7129 		/* Two most significant bits shall be set */
7130 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7131 			return false;
7132 		return true;
7133 	}
7134 
7135 	return false;
7136 }
7137 
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7138 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7139 			       void *cp_data, u16 len)
7140 {
7141 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7142 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7143 				   sizeof(struct mgmt_ltk_info));
7144 	u16 key_count, expected_len;
7145 	int i, err;
7146 
7147 	bt_dev_dbg(hdev, "sock %p", sk);
7148 
7149 	if (!lmp_le_capable(hdev))
7150 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7151 				       MGMT_STATUS_NOT_SUPPORTED);
7152 
7153 	key_count = __le16_to_cpu(cp->key_count);
7154 	if (key_count > max_key_count) {
7155 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7156 			   key_count);
7157 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7158 				       MGMT_STATUS_INVALID_PARAMS);
7159 	}
7160 
7161 	expected_len = struct_size(cp, keys, key_count);
7162 	if (expected_len != len) {
7163 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7164 			   expected_len, len);
7165 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7166 				       MGMT_STATUS_INVALID_PARAMS);
7167 	}
7168 
7169 	bt_dev_dbg(hdev, "key_count %u", key_count);
7170 
7171 	hci_dev_lock(hdev);
7172 
7173 	hci_smp_ltks_clear(hdev);
7174 
7175 	for (i = 0; i < key_count; i++) {
7176 		struct mgmt_ltk_info *key = &cp->keys[i];
7177 		u8 type, authenticated;
7178 
7179 		if (hci_is_blocked_key(hdev,
7180 				       HCI_BLOCKED_KEY_TYPE_LTK,
7181 				       key->val)) {
7182 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7183 				    &key->addr.bdaddr);
7184 			continue;
7185 		}
7186 
7187 		if (!ltk_is_valid(key)) {
7188 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7189 				    &key->addr.bdaddr);
7190 			continue;
7191 		}
7192 
7193 		switch (key->type) {
7194 		case MGMT_LTK_UNAUTHENTICATED:
7195 			authenticated = 0x00;
7196 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7197 			break;
7198 		case MGMT_LTK_AUTHENTICATED:
7199 			authenticated = 0x01;
7200 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7201 			break;
7202 		case MGMT_LTK_P256_UNAUTH:
7203 			authenticated = 0x00;
7204 			type = SMP_LTK_P256;
7205 			break;
7206 		case MGMT_LTK_P256_AUTH:
7207 			authenticated = 0x01;
7208 			type = SMP_LTK_P256;
7209 			break;
7210 		case MGMT_LTK_P256_DEBUG:
7211 			authenticated = 0x00;
7212 			type = SMP_LTK_P256_DEBUG;
7213 			fallthrough;
7214 		default:
7215 			continue;
7216 		}
7217 
7218 		hci_add_ltk(hdev, &key->addr.bdaddr,
7219 			    le_addr_type(key->addr.type), type, authenticated,
7220 			    key->val, key->enc_size, key->ediv, key->rand);
7221 	}
7222 
7223 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7224 			   NULL, 0);
7225 
7226 	hci_dev_unlock(hdev);
7227 
7228 	return err;
7229 }
7230 
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7231 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7232 {
7233 	struct mgmt_pending_cmd *cmd = data;
7234 	struct hci_conn *conn = cmd->user_data;
7235 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7236 	struct mgmt_rp_get_conn_info rp;
7237 	u8 status;
7238 
7239 	bt_dev_dbg(hdev, "err %d", err);
7240 
7241 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7242 
7243 	status = mgmt_status(err);
7244 	if (status == MGMT_STATUS_SUCCESS) {
7245 		rp.rssi = conn->rssi;
7246 		rp.tx_power = conn->tx_power;
7247 		rp.max_tx_power = conn->max_tx_power;
7248 	} else {
7249 		rp.rssi = HCI_RSSI_INVALID;
7250 		rp.tx_power = HCI_TX_POWER_INVALID;
7251 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7252 	}
7253 
7254 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7255 			  &rp, sizeof(rp));
7256 
7257 	mgmt_pending_free(cmd);
7258 }
7259 
get_conn_info_sync(struct hci_dev * hdev,void * data)7260 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7261 {
7262 	struct mgmt_pending_cmd *cmd = data;
7263 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7264 	struct hci_conn *conn;
7265 	int err;
7266 	__le16   handle;
7267 
7268 	/* Make sure we are still connected */
7269 	if (cp->addr.type == BDADDR_BREDR)
7270 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7271 					       &cp->addr.bdaddr);
7272 	else
7273 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7274 
7275 	if (!conn || conn->state != BT_CONNECTED)
7276 		return MGMT_STATUS_NOT_CONNECTED;
7277 
7278 	cmd->user_data = conn;
7279 	handle = cpu_to_le16(conn->handle);
7280 
7281 	/* Refresh RSSI each time */
7282 	err = hci_read_rssi_sync(hdev, handle);
7283 
7284 	/* For LE links TX power does not change thus we don't need to
7285 	 * query for it once value is known.
7286 	 */
7287 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7288 		     conn->tx_power == HCI_TX_POWER_INVALID))
7289 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7290 
7291 	/* Max TX power needs to be read only once per connection */
7292 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7293 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7294 
7295 	return err;
7296 }
7297 
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7298 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7299 			 u16 len)
7300 {
7301 	struct mgmt_cp_get_conn_info *cp = data;
7302 	struct mgmt_rp_get_conn_info rp;
7303 	struct hci_conn *conn;
7304 	unsigned long conn_info_age;
7305 	int err = 0;
7306 
7307 	bt_dev_dbg(hdev, "sock %p", sk);
7308 
7309 	memset(&rp, 0, sizeof(rp));
7310 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7311 	rp.addr.type = cp->addr.type;
7312 
7313 	if (!bdaddr_type_is_valid(cp->addr.type))
7314 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7315 					 MGMT_STATUS_INVALID_PARAMS,
7316 					 &rp, sizeof(rp));
7317 
7318 	hci_dev_lock(hdev);
7319 
7320 	if (!hdev_is_powered(hdev)) {
7321 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7322 					MGMT_STATUS_NOT_POWERED, &rp,
7323 					sizeof(rp));
7324 		goto unlock;
7325 	}
7326 
7327 	if (cp->addr.type == BDADDR_BREDR)
7328 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7329 					       &cp->addr.bdaddr);
7330 	else
7331 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7332 
7333 	if (!conn || conn->state != BT_CONNECTED) {
7334 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7335 					MGMT_STATUS_NOT_CONNECTED, &rp,
7336 					sizeof(rp));
7337 		goto unlock;
7338 	}
7339 
7340 	/* To avoid client trying to guess when to poll again for information we
7341 	 * calculate conn info age as random value between min/max set in hdev.
7342 	 */
7343 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7344 						 hdev->conn_info_max_age - 1);
7345 
7346 	/* Query controller to refresh cached values if they are too old or were
7347 	 * never read.
7348 	 */
7349 	if (time_after(jiffies, conn->conn_info_timestamp +
7350 		       msecs_to_jiffies(conn_info_age)) ||
7351 	    !conn->conn_info_timestamp) {
7352 		struct mgmt_pending_cmd *cmd;
7353 
7354 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7355 				       len);
7356 		if (!cmd) {
7357 			err = -ENOMEM;
7358 		} else {
7359 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7360 						 cmd, get_conn_info_complete);
7361 		}
7362 
7363 		if (err < 0) {
7364 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7365 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7366 
7367 			if (cmd)
7368 				mgmt_pending_free(cmd);
7369 
7370 			goto unlock;
7371 		}
7372 
7373 		conn->conn_info_timestamp = jiffies;
7374 	} else {
7375 		/* Cache is valid, just reply with values cached in hci_conn */
7376 		rp.rssi = conn->rssi;
7377 		rp.tx_power = conn->tx_power;
7378 		rp.max_tx_power = conn->max_tx_power;
7379 
7380 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7381 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7382 	}
7383 
7384 unlock:
7385 	hci_dev_unlock(hdev);
7386 	return err;
7387 }
7388 
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7389 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7390 {
7391 	struct mgmt_pending_cmd *cmd = data;
7392 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7393 	struct mgmt_rp_get_clock_info rp;
7394 	struct hci_conn *conn = cmd->user_data;
7395 	u8 status = mgmt_status(err);
7396 
7397 	bt_dev_dbg(hdev, "err %d", err);
7398 
7399 	memset(&rp, 0, sizeof(rp));
7400 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7401 	rp.addr.type = cp->addr.type;
7402 
7403 	if (err)
7404 		goto complete;
7405 
7406 	rp.local_clock = cpu_to_le32(hdev->clock);
7407 
7408 	if (conn) {
7409 		rp.piconet_clock = cpu_to_le32(conn->clock);
7410 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7411 	}
7412 
7413 complete:
7414 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7415 			  sizeof(rp));
7416 
7417 	mgmt_pending_free(cmd);
7418 }
7419 
get_clock_info_sync(struct hci_dev * hdev,void * data)7420 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7421 {
7422 	struct mgmt_pending_cmd *cmd = data;
7423 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7424 	struct hci_cp_read_clock hci_cp;
7425 	struct hci_conn *conn;
7426 
7427 	memset(&hci_cp, 0, sizeof(hci_cp));
7428 	hci_read_clock_sync(hdev, &hci_cp);
7429 
7430 	/* Make sure connection still exists */
7431 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7432 	if (!conn || conn->state != BT_CONNECTED)
7433 		return MGMT_STATUS_NOT_CONNECTED;
7434 
7435 	cmd->user_data = conn;
7436 	hci_cp.handle = cpu_to_le16(conn->handle);
7437 	hci_cp.which = 0x01; /* Piconet clock */
7438 
7439 	return hci_read_clock_sync(hdev, &hci_cp);
7440 }
7441 
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7442 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7443 								u16 len)
7444 {
7445 	struct mgmt_cp_get_clock_info *cp = data;
7446 	struct mgmt_rp_get_clock_info rp;
7447 	struct mgmt_pending_cmd *cmd;
7448 	struct hci_conn *conn;
7449 	int err;
7450 
7451 	bt_dev_dbg(hdev, "sock %p", sk);
7452 
7453 	memset(&rp, 0, sizeof(rp));
7454 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7455 	rp.addr.type = cp->addr.type;
7456 
7457 	if (cp->addr.type != BDADDR_BREDR)
7458 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7459 					 MGMT_STATUS_INVALID_PARAMS,
7460 					 &rp, sizeof(rp));
7461 
7462 	hci_dev_lock(hdev);
7463 
7464 	if (!hdev_is_powered(hdev)) {
7465 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7466 					MGMT_STATUS_NOT_POWERED, &rp,
7467 					sizeof(rp));
7468 		goto unlock;
7469 	}
7470 
7471 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7472 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7473 					       &cp->addr.bdaddr);
7474 		if (!conn || conn->state != BT_CONNECTED) {
7475 			err = mgmt_cmd_complete(sk, hdev->id,
7476 						MGMT_OP_GET_CLOCK_INFO,
7477 						MGMT_STATUS_NOT_CONNECTED,
7478 						&rp, sizeof(rp));
7479 			goto unlock;
7480 		}
7481 	} else {
7482 		conn = NULL;
7483 	}
7484 
7485 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7486 	if (!cmd)
7487 		err = -ENOMEM;
7488 	else
7489 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7490 					 get_clock_info_complete);
7491 
7492 	if (err < 0) {
7493 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7494 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7495 
7496 		if (cmd)
7497 			mgmt_pending_free(cmd);
7498 	}
7499 
7500 
7501 unlock:
7502 	hci_dev_unlock(hdev);
7503 	return err;
7504 }
7505 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7506 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7507 {
7508 	struct hci_conn *conn;
7509 
7510 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7511 	if (!conn)
7512 		return false;
7513 
7514 	if (conn->dst_type != type)
7515 		return false;
7516 
7517 	if (conn->state != BT_CONNECTED)
7518 		return false;
7519 
7520 	return true;
7521 }
7522 
7523 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7524 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7525 			       u8 addr_type, u8 auto_connect)
7526 {
7527 	struct hci_conn_params *params;
7528 
7529 	params = hci_conn_params_add(hdev, addr, addr_type);
7530 	if (!params)
7531 		return -EIO;
7532 
7533 	if (params->auto_connect == auto_connect)
7534 		return 0;
7535 
7536 	hci_pend_le_list_del_init(params);
7537 
7538 	switch (auto_connect) {
7539 	case HCI_AUTO_CONN_DISABLED:
7540 	case HCI_AUTO_CONN_LINK_LOSS:
7541 		/* If auto connect is being disabled when we're trying to
7542 		 * connect to device, keep connecting.
7543 		 */
7544 		if (params->explicit_connect)
7545 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7546 		break;
7547 	case HCI_AUTO_CONN_REPORT:
7548 		if (params->explicit_connect)
7549 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7550 		else
7551 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7552 		break;
7553 	case HCI_AUTO_CONN_DIRECT:
7554 	case HCI_AUTO_CONN_ALWAYS:
7555 		if (!is_connected(hdev, addr, addr_type))
7556 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7557 		break;
7558 	}
7559 
7560 	params->auto_connect = auto_connect;
7561 
7562 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7563 		   addr, addr_type, auto_connect);
7564 
7565 	return 0;
7566 }
7567 
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7568 static void device_added(struct sock *sk, struct hci_dev *hdev,
7569 			 bdaddr_t *bdaddr, u8 type, u8 action)
7570 {
7571 	struct mgmt_ev_device_added ev;
7572 
7573 	bacpy(&ev.addr.bdaddr, bdaddr);
7574 	ev.addr.type = type;
7575 	ev.action = action;
7576 
7577 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7578 }
7579 
add_device_complete(struct hci_dev * hdev,void * data,int err)7580 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7581 {
7582 	struct mgmt_pending_cmd *cmd = data;
7583 	struct mgmt_cp_add_device *cp = cmd->param;
7584 
7585 	if (!err) {
7586 		struct hci_conn_params *params;
7587 
7588 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7589 						le_addr_type(cp->addr.type));
7590 
7591 		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7592 			     cp->action);
7593 		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7594 				     cp->addr.type, hdev->conn_flags,
7595 				     params ? params->flags : 0);
7596 	}
7597 
7598 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7599 			  mgmt_status(err), &cp->addr, sizeof(cp->addr));
7600 	mgmt_pending_free(cmd);
7601 }
7602 
add_device_sync(struct hci_dev * hdev,void * data)7603 static int add_device_sync(struct hci_dev *hdev, void *data)
7604 {
7605 	return hci_update_passive_scan_sync(hdev);
7606 }
7607 
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7608 static int add_device(struct sock *sk, struct hci_dev *hdev,
7609 		      void *data, u16 len)
7610 {
7611 	struct mgmt_pending_cmd *cmd;
7612 	struct mgmt_cp_add_device *cp = data;
7613 	u8 auto_conn, addr_type;
7614 	struct hci_conn_params *params;
7615 	int err;
7616 	u32 current_flags = 0;
7617 	u32 supported_flags;
7618 
7619 	bt_dev_dbg(hdev, "sock %p", sk);
7620 
7621 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7622 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7623 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7624 					 MGMT_STATUS_INVALID_PARAMS,
7625 					 &cp->addr, sizeof(cp->addr));
7626 
7627 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7628 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7629 					 MGMT_STATUS_INVALID_PARAMS,
7630 					 &cp->addr, sizeof(cp->addr));
7631 
7632 	hci_dev_lock(hdev);
7633 
7634 	if (cp->addr.type == BDADDR_BREDR) {
7635 		/* Only incoming connections action is supported for now */
7636 		if (cp->action != 0x01) {
7637 			err = mgmt_cmd_complete(sk, hdev->id,
7638 						MGMT_OP_ADD_DEVICE,
7639 						MGMT_STATUS_INVALID_PARAMS,
7640 						&cp->addr, sizeof(cp->addr));
7641 			goto unlock;
7642 		}
7643 
7644 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7645 						     &cp->addr.bdaddr,
7646 						     cp->addr.type, 0);
7647 		if (err)
7648 			goto unlock;
7649 
7650 		hci_update_scan(hdev);
7651 
7652 		goto added;
7653 	}
7654 
7655 	addr_type = le_addr_type(cp->addr.type);
7656 
7657 	if (cp->action == 0x02)
7658 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7659 	else if (cp->action == 0x01)
7660 		auto_conn = HCI_AUTO_CONN_DIRECT;
7661 	else
7662 		auto_conn = HCI_AUTO_CONN_REPORT;
7663 
7664 	/* Kernel internally uses conn_params with resolvable private
7665 	 * address, but Add Device allows only identity addresses.
7666 	 * Make sure it is enforced before calling
7667 	 * hci_conn_params_lookup.
7668 	 */
7669 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7670 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7671 					MGMT_STATUS_INVALID_PARAMS,
7672 					&cp->addr, sizeof(cp->addr));
7673 		goto unlock;
7674 	}
7675 
7676 	/* If the connection parameters don't exist for this device,
7677 	 * they will be created and configured with defaults.
7678 	 */
7679 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7680 				auto_conn) < 0) {
7681 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7682 					MGMT_STATUS_FAILED, &cp->addr,
7683 					sizeof(cp->addr));
7684 		goto unlock;
7685 	} else {
7686 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7687 						addr_type);
7688 		if (params)
7689 			current_flags = params->flags;
7690 	}
7691 
7692 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7693 	if (!cmd) {
7694 		err = -ENOMEM;
7695 		goto unlock;
7696 	}
7697 
7698 	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7699 				 add_device_complete);
7700 	if (err < 0) {
7701 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7702 					MGMT_STATUS_FAILED, &cp->addr,
7703 					sizeof(cp->addr));
7704 		mgmt_pending_free(cmd);
7705 	}
7706 
7707 	goto unlock;
7708 
7709 added:
7710 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7711 	supported_flags = hdev->conn_flags;
7712 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7713 			     supported_flags, current_flags);
7714 
7715 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7716 				MGMT_STATUS_SUCCESS, &cp->addr,
7717 				sizeof(cp->addr));
7718 
7719 unlock:
7720 	hci_dev_unlock(hdev);
7721 	return err;
7722 }
7723 
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7724 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7725 			   bdaddr_t *bdaddr, u8 type)
7726 {
7727 	struct mgmt_ev_device_removed ev;
7728 
7729 	bacpy(&ev.addr.bdaddr, bdaddr);
7730 	ev.addr.type = type;
7731 
7732 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7733 }
7734 
remove_device_sync(struct hci_dev * hdev,void * data)7735 static int remove_device_sync(struct hci_dev *hdev, void *data)
7736 {
7737 	return hci_update_passive_scan_sync(hdev);
7738 }
7739 
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7740 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7741 			 void *data, u16 len)
7742 {
7743 	struct mgmt_cp_remove_device *cp = data;
7744 	int err;
7745 
7746 	bt_dev_dbg(hdev, "sock %p", sk);
7747 
7748 	hci_dev_lock(hdev);
7749 
7750 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7751 		struct hci_conn_params *params;
7752 		u8 addr_type;
7753 
7754 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7755 			err = mgmt_cmd_complete(sk, hdev->id,
7756 						MGMT_OP_REMOVE_DEVICE,
7757 						MGMT_STATUS_INVALID_PARAMS,
7758 						&cp->addr, sizeof(cp->addr));
7759 			goto unlock;
7760 		}
7761 
7762 		if (cp->addr.type == BDADDR_BREDR) {
7763 			err = hci_bdaddr_list_del(&hdev->accept_list,
7764 						  &cp->addr.bdaddr,
7765 						  cp->addr.type);
7766 			if (err) {
7767 				err = mgmt_cmd_complete(sk, hdev->id,
7768 							MGMT_OP_REMOVE_DEVICE,
7769 							MGMT_STATUS_INVALID_PARAMS,
7770 							&cp->addr,
7771 							sizeof(cp->addr));
7772 				goto unlock;
7773 			}
7774 
7775 			hci_update_scan(hdev);
7776 
7777 			device_removed(sk, hdev, &cp->addr.bdaddr,
7778 				       cp->addr.type);
7779 			goto complete;
7780 		}
7781 
7782 		addr_type = le_addr_type(cp->addr.type);
7783 
7784 		/* Kernel internally uses conn_params with resolvable private
7785 		 * address, but Remove Device allows only identity addresses.
7786 		 * Make sure it is enforced before calling
7787 		 * hci_conn_params_lookup.
7788 		 */
7789 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7790 			err = mgmt_cmd_complete(sk, hdev->id,
7791 						MGMT_OP_REMOVE_DEVICE,
7792 						MGMT_STATUS_INVALID_PARAMS,
7793 						&cp->addr, sizeof(cp->addr));
7794 			goto unlock;
7795 		}
7796 
7797 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7798 						addr_type);
7799 		if (!params) {
7800 			err = mgmt_cmd_complete(sk, hdev->id,
7801 						MGMT_OP_REMOVE_DEVICE,
7802 						MGMT_STATUS_INVALID_PARAMS,
7803 						&cp->addr, sizeof(cp->addr));
7804 			goto unlock;
7805 		}
7806 
7807 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7808 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7809 			err = mgmt_cmd_complete(sk, hdev->id,
7810 						MGMT_OP_REMOVE_DEVICE,
7811 						MGMT_STATUS_INVALID_PARAMS,
7812 						&cp->addr, sizeof(cp->addr));
7813 			goto unlock;
7814 		}
7815 
7816 		hci_conn_params_free(params);
7817 
7818 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7819 	} else {
7820 		struct hci_conn_params *p, *tmp;
7821 		struct bdaddr_list *b, *btmp;
7822 
7823 		if (cp->addr.type) {
7824 			err = mgmt_cmd_complete(sk, hdev->id,
7825 						MGMT_OP_REMOVE_DEVICE,
7826 						MGMT_STATUS_INVALID_PARAMS,
7827 						&cp->addr, sizeof(cp->addr));
7828 			goto unlock;
7829 		}
7830 
7831 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7832 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7833 			list_del(&b->list);
7834 			kfree(b);
7835 		}
7836 
7837 		hci_update_scan(hdev);
7838 
7839 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7840 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7841 				continue;
7842 			device_removed(sk, hdev, &p->addr, p->addr_type);
7843 			if (p->explicit_connect) {
7844 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7845 				continue;
7846 			}
7847 			hci_conn_params_free(p);
7848 		}
7849 
7850 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7851 	}
7852 
7853 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7854 
7855 complete:
7856 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7857 				MGMT_STATUS_SUCCESS, &cp->addr,
7858 				sizeof(cp->addr));
7859 unlock:
7860 	hci_dev_unlock(hdev);
7861 	return err;
7862 }
7863 
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7864 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7865 			   u16 len)
7866 {
7867 	struct mgmt_cp_load_conn_param *cp = data;
7868 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7869 				     sizeof(struct mgmt_conn_param));
7870 	u16 param_count, expected_len;
7871 	int i;
7872 
7873 	if (!lmp_le_capable(hdev))
7874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7875 				       MGMT_STATUS_NOT_SUPPORTED);
7876 
7877 	param_count = __le16_to_cpu(cp->param_count);
7878 	if (param_count > max_param_count) {
7879 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7880 			   param_count);
7881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7882 				       MGMT_STATUS_INVALID_PARAMS);
7883 	}
7884 
7885 	expected_len = struct_size(cp, params, param_count);
7886 	if (expected_len != len) {
7887 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7888 			   expected_len, len);
7889 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7890 				       MGMT_STATUS_INVALID_PARAMS);
7891 	}
7892 
7893 	bt_dev_dbg(hdev, "param_count %u", param_count);
7894 
7895 	hci_dev_lock(hdev);
7896 
7897 	hci_conn_params_clear_disabled(hdev);
7898 
7899 	for (i = 0; i < param_count; i++) {
7900 		struct mgmt_conn_param *param = &cp->params[i];
7901 		struct hci_conn_params *hci_param;
7902 		u16 min, max, latency, timeout;
7903 		u8 addr_type;
7904 
7905 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7906 			   param->addr.type);
7907 
7908 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7909 			addr_type = ADDR_LE_DEV_PUBLIC;
7910 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7911 			addr_type = ADDR_LE_DEV_RANDOM;
7912 		} else {
7913 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7914 			continue;
7915 		}
7916 
7917 		min = le16_to_cpu(param->min_interval);
7918 		max = le16_to_cpu(param->max_interval);
7919 		latency = le16_to_cpu(param->latency);
7920 		timeout = le16_to_cpu(param->timeout);
7921 
7922 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7923 			   min, max, latency, timeout);
7924 
7925 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7926 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7927 			continue;
7928 		}
7929 
7930 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7931 						addr_type);
7932 		if (!hci_param) {
7933 			bt_dev_err(hdev, "failed to add connection parameters");
7934 			continue;
7935 		}
7936 
7937 		hci_param->conn_min_interval = min;
7938 		hci_param->conn_max_interval = max;
7939 		hci_param->conn_latency = latency;
7940 		hci_param->supervision_timeout = timeout;
7941 	}
7942 
7943 	hci_dev_unlock(hdev);
7944 
7945 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7946 				 NULL, 0);
7947 }
7948 
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7949 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7950 			       void *data, u16 len)
7951 {
7952 	struct mgmt_cp_set_external_config *cp = data;
7953 	bool changed;
7954 	int err;
7955 
7956 	bt_dev_dbg(hdev, "sock %p", sk);
7957 
7958 	if (hdev_is_powered(hdev))
7959 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7960 				       MGMT_STATUS_REJECTED);
7961 
7962 	if (cp->config != 0x00 && cp->config != 0x01)
7963 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7964 				         MGMT_STATUS_INVALID_PARAMS);
7965 
7966 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7967 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7968 				       MGMT_STATUS_NOT_SUPPORTED);
7969 
7970 	hci_dev_lock(hdev);
7971 
7972 	if (cp->config)
7973 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7974 	else
7975 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7976 
7977 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7978 	if (err < 0)
7979 		goto unlock;
7980 
7981 	if (!changed)
7982 		goto unlock;
7983 
7984 	err = new_options(hdev, sk);
7985 
7986 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7987 		mgmt_index_removed(hdev);
7988 
7989 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7990 			hci_dev_set_flag(hdev, HCI_CONFIG);
7991 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7992 
7993 			queue_work(hdev->req_workqueue, &hdev->power_on);
7994 		} else {
7995 			set_bit(HCI_RAW, &hdev->flags);
7996 			mgmt_index_added(hdev);
7997 		}
7998 	}
7999 
8000 unlock:
8001 	hci_dev_unlock(hdev);
8002 	return err;
8003 }
8004 
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8005 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8006 			      void *data, u16 len)
8007 {
8008 	struct mgmt_cp_set_public_address *cp = data;
8009 	bool changed;
8010 	int err;
8011 
8012 	bt_dev_dbg(hdev, "sock %p", sk);
8013 
8014 	if (hdev_is_powered(hdev))
8015 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8016 				       MGMT_STATUS_REJECTED);
8017 
8018 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8019 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8020 				       MGMT_STATUS_INVALID_PARAMS);
8021 
8022 	if (!hdev->set_bdaddr)
8023 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8024 				       MGMT_STATUS_NOT_SUPPORTED);
8025 
8026 	hci_dev_lock(hdev);
8027 
8028 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8029 	bacpy(&hdev->public_addr, &cp->bdaddr);
8030 
8031 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8032 	if (err < 0)
8033 		goto unlock;
8034 
8035 	if (!changed)
8036 		goto unlock;
8037 
8038 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8039 		err = new_options(hdev, sk);
8040 
8041 	if (is_configured(hdev)) {
8042 		mgmt_index_removed(hdev);
8043 
8044 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8045 
8046 		hci_dev_set_flag(hdev, HCI_CONFIG);
8047 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8048 
8049 		queue_work(hdev->req_workqueue, &hdev->power_on);
8050 	}
8051 
8052 unlock:
8053 	hci_dev_unlock(hdev);
8054 	return err;
8055 }
8056 
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8057 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8058 					     int err)
8059 {
8060 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8061 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8062 	u8 *h192, *r192, *h256, *r256;
8063 	struct mgmt_pending_cmd *cmd = data;
8064 	struct sk_buff *skb = cmd->skb;
8065 	u8 status = mgmt_status(err);
8066 	u16 eir_len;
8067 
8068 	if (err == -ECANCELED ||
8069 	    cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8070 		return;
8071 
8072 	if (!status) {
8073 		if (!skb)
8074 			status = MGMT_STATUS_FAILED;
8075 		else if (IS_ERR(skb))
8076 			status = mgmt_status(PTR_ERR(skb));
8077 		else
8078 			status = mgmt_status(skb->data[0]);
8079 	}
8080 
8081 	bt_dev_dbg(hdev, "status %u", status);
8082 
8083 	mgmt_cp = cmd->param;
8084 
8085 	if (status) {
8086 		status = mgmt_status(status);
8087 		eir_len = 0;
8088 
8089 		h192 = NULL;
8090 		r192 = NULL;
8091 		h256 = NULL;
8092 		r256 = NULL;
8093 	} else if (!bredr_sc_enabled(hdev)) {
8094 		struct hci_rp_read_local_oob_data *rp;
8095 
8096 		if (skb->len != sizeof(*rp)) {
8097 			status = MGMT_STATUS_FAILED;
8098 			eir_len = 0;
8099 		} else {
8100 			status = MGMT_STATUS_SUCCESS;
8101 			rp = (void *)skb->data;
8102 
8103 			eir_len = 5 + 18 + 18;
8104 			h192 = rp->hash;
8105 			r192 = rp->rand;
8106 			h256 = NULL;
8107 			r256 = NULL;
8108 		}
8109 	} else {
8110 		struct hci_rp_read_local_oob_ext_data *rp;
8111 
8112 		if (skb->len != sizeof(*rp)) {
8113 			status = MGMT_STATUS_FAILED;
8114 			eir_len = 0;
8115 		} else {
8116 			status = MGMT_STATUS_SUCCESS;
8117 			rp = (void *)skb->data;
8118 
8119 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8120 				eir_len = 5 + 18 + 18;
8121 				h192 = NULL;
8122 				r192 = NULL;
8123 			} else {
8124 				eir_len = 5 + 18 + 18 + 18 + 18;
8125 				h192 = rp->hash192;
8126 				r192 = rp->rand192;
8127 			}
8128 
8129 			h256 = rp->hash256;
8130 			r256 = rp->rand256;
8131 		}
8132 	}
8133 
8134 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8135 	if (!mgmt_rp)
8136 		goto done;
8137 
8138 	if (eir_len == 0)
8139 		goto send_rsp;
8140 
8141 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8142 				  hdev->dev_class, 3);
8143 
8144 	if (h192 && r192) {
8145 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8146 					  EIR_SSP_HASH_C192, h192, 16);
8147 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8148 					  EIR_SSP_RAND_R192, r192, 16);
8149 	}
8150 
8151 	if (h256 && r256) {
8152 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8153 					  EIR_SSP_HASH_C256, h256, 16);
8154 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8155 					  EIR_SSP_RAND_R256, r256, 16);
8156 	}
8157 
8158 send_rsp:
8159 	mgmt_rp->type = mgmt_cp->type;
8160 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8161 
8162 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8163 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8164 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8165 	if (err < 0 || status)
8166 		goto done;
8167 
8168 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8169 
8170 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8171 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8172 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8173 done:
8174 	if (skb && !IS_ERR(skb))
8175 		kfree_skb(skb);
8176 
8177 	kfree(mgmt_rp);
8178 	mgmt_pending_remove(cmd);
8179 }
8180 
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8181 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8182 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8183 {
8184 	struct mgmt_pending_cmd *cmd;
8185 	int err;
8186 
8187 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8188 			       cp, sizeof(*cp));
8189 	if (!cmd)
8190 		return -ENOMEM;
8191 
8192 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8193 				 read_local_oob_ext_data_complete);
8194 
8195 	if (err < 0) {
8196 		mgmt_pending_remove(cmd);
8197 		return err;
8198 	}
8199 
8200 	return 0;
8201 }
8202 
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8203 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8204 				   void *data, u16 data_len)
8205 {
8206 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8207 	struct mgmt_rp_read_local_oob_ext_data *rp;
8208 	size_t rp_len;
8209 	u16 eir_len;
8210 	u8 status, flags, role, addr[7], hash[16], rand[16];
8211 	int err;
8212 
8213 	bt_dev_dbg(hdev, "sock %p", sk);
8214 
8215 	if (hdev_is_powered(hdev)) {
8216 		switch (cp->type) {
8217 		case BIT(BDADDR_BREDR):
8218 			status = mgmt_bredr_support(hdev);
8219 			if (status)
8220 				eir_len = 0;
8221 			else
8222 				eir_len = 5;
8223 			break;
8224 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8225 			status = mgmt_le_support(hdev);
8226 			if (status)
8227 				eir_len = 0;
8228 			else
8229 				eir_len = 9 + 3 + 18 + 18 + 3;
8230 			break;
8231 		default:
8232 			status = MGMT_STATUS_INVALID_PARAMS;
8233 			eir_len = 0;
8234 			break;
8235 		}
8236 	} else {
8237 		status = MGMT_STATUS_NOT_POWERED;
8238 		eir_len = 0;
8239 	}
8240 
8241 	rp_len = sizeof(*rp) + eir_len;
8242 	rp = kmalloc(rp_len, GFP_ATOMIC);
8243 	if (!rp)
8244 		return -ENOMEM;
8245 
8246 	if (!status && !lmp_ssp_capable(hdev)) {
8247 		status = MGMT_STATUS_NOT_SUPPORTED;
8248 		eir_len = 0;
8249 	}
8250 
8251 	if (status)
8252 		goto complete;
8253 
8254 	hci_dev_lock(hdev);
8255 
8256 	eir_len = 0;
8257 	switch (cp->type) {
8258 	case BIT(BDADDR_BREDR):
8259 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8260 			err = read_local_ssp_oob_req(hdev, sk, cp);
8261 			hci_dev_unlock(hdev);
8262 			if (!err)
8263 				goto done;
8264 
8265 			status = MGMT_STATUS_FAILED;
8266 			goto complete;
8267 		} else {
8268 			eir_len = eir_append_data(rp->eir, eir_len,
8269 						  EIR_CLASS_OF_DEV,
8270 						  hdev->dev_class, 3);
8271 		}
8272 		break;
8273 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8274 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8275 		    smp_generate_oob(hdev, hash, rand) < 0) {
8276 			hci_dev_unlock(hdev);
8277 			status = MGMT_STATUS_FAILED;
8278 			goto complete;
8279 		}
8280 
8281 		/* This should return the active RPA, but since the RPA
8282 		 * is only programmed on demand, it is really hard to fill
8283 		 * this in at the moment. For now disallow retrieving
8284 		 * local out-of-band data when privacy is in use.
8285 		 *
8286 		 * Returning the identity address will not help here since
8287 		 * pairing happens before the identity resolving key is
8288 		 * known and thus the connection establishment happens
8289 		 * based on the RPA and not the identity address.
8290 		 */
8291 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8292 			hci_dev_unlock(hdev);
8293 			status = MGMT_STATUS_REJECTED;
8294 			goto complete;
8295 		}
8296 
8297 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8298 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8299 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8300 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8301 			memcpy(addr, &hdev->static_addr, 6);
8302 			addr[6] = 0x01;
8303 		} else {
8304 			memcpy(addr, &hdev->bdaddr, 6);
8305 			addr[6] = 0x00;
8306 		}
8307 
8308 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8309 					  addr, sizeof(addr));
8310 
8311 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8312 			role = 0x02;
8313 		else
8314 			role = 0x01;
8315 
8316 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8317 					  &role, sizeof(role));
8318 
8319 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8320 			eir_len = eir_append_data(rp->eir, eir_len,
8321 						  EIR_LE_SC_CONFIRM,
8322 						  hash, sizeof(hash));
8323 
8324 			eir_len = eir_append_data(rp->eir, eir_len,
8325 						  EIR_LE_SC_RANDOM,
8326 						  rand, sizeof(rand));
8327 		}
8328 
8329 		flags = mgmt_get_adv_discov_flags(hdev);
8330 
8331 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8332 			flags |= LE_AD_NO_BREDR;
8333 
8334 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8335 					  &flags, sizeof(flags));
8336 		break;
8337 	}
8338 
8339 	hci_dev_unlock(hdev);
8340 
8341 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8342 
8343 	status = MGMT_STATUS_SUCCESS;
8344 
8345 complete:
8346 	rp->type = cp->type;
8347 	rp->eir_len = cpu_to_le16(eir_len);
8348 
8349 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8350 				status, rp, sizeof(*rp) + eir_len);
8351 	if (err < 0 || status)
8352 		goto done;
8353 
8354 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8355 				 rp, sizeof(*rp) + eir_len,
8356 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8357 
8358 done:
8359 	kfree(rp);
8360 
8361 	return err;
8362 }
8363 
get_supported_adv_flags(struct hci_dev * hdev)8364 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8365 {
8366 	u32 flags = 0;
8367 
8368 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8369 	flags |= MGMT_ADV_FLAG_DISCOV;
8370 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8371 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8372 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8373 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8374 	flags |= MGMT_ADV_PARAM_DURATION;
8375 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8376 	flags |= MGMT_ADV_PARAM_INTERVALS;
8377 	flags |= MGMT_ADV_PARAM_TX_POWER;
8378 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8379 
8380 	/* In extended adv TX_POWER returned from Set Adv Param
8381 	 * will be always valid.
8382 	 */
8383 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8384 		flags |= MGMT_ADV_FLAG_TX_POWER;
8385 
8386 	if (ext_adv_capable(hdev)) {
8387 		flags |= MGMT_ADV_FLAG_SEC_1M;
8388 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8389 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8390 
8391 		if (le_2m_capable(hdev))
8392 			flags |= MGMT_ADV_FLAG_SEC_2M;
8393 
8394 		if (le_coded_capable(hdev))
8395 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8396 	}
8397 
8398 	return flags;
8399 }
8400 
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8401 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8402 			     void *data, u16 data_len)
8403 {
8404 	struct mgmt_rp_read_adv_features *rp;
8405 	size_t rp_len;
8406 	int err;
8407 	struct adv_info *adv_instance;
8408 	u32 supported_flags;
8409 	u8 *instance;
8410 
8411 	bt_dev_dbg(hdev, "sock %p", sk);
8412 
8413 	if (!lmp_le_capable(hdev))
8414 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8415 				       MGMT_STATUS_REJECTED);
8416 
8417 	hci_dev_lock(hdev);
8418 
8419 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8420 	rp = kmalloc(rp_len, GFP_ATOMIC);
8421 	if (!rp) {
8422 		hci_dev_unlock(hdev);
8423 		return -ENOMEM;
8424 	}
8425 
8426 	supported_flags = get_supported_adv_flags(hdev);
8427 
8428 	rp->supported_flags = cpu_to_le32(supported_flags);
8429 	rp->max_adv_data_len = max_adv_len(hdev);
8430 	rp->max_scan_rsp_len = max_adv_len(hdev);
8431 	rp->max_instances = hdev->le_num_of_adv_sets;
8432 	rp->num_instances = hdev->adv_instance_cnt;
8433 
8434 	instance = rp->instance;
8435 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8436 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8437 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8438 			*instance = adv_instance->instance;
8439 			instance++;
8440 		} else {
8441 			rp->num_instances--;
8442 			rp_len--;
8443 		}
8444 	}
8445 
8446 	hci_dev_unlock(hdev);
8447 
8448 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8449 				MGMT_STATUS_SUCCESS, rp, rp_len);
8450 
8451 	kfree(rp);
8452 
8453 	return err;
8454 }
8455 
calculate_name_len(struct hci_dev * hdev)8456 static u8 calculate_name_len(struct hci_dev *hdev)
8457 {
8458 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8459 
8460 	return eir_append_local_name(hdev, buf, 0);
8461 }
8462 
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8463 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8464 			   bool is_adv_data)
8465 {
8466 	u8 max_len = max_adv_len(hdev);
8467 
8468 	if (is_adv_data) {
8469 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8470 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8471 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8472 			max_len -= 3;
8473 
8474 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8475 			max_len -= 3;
8476 	} else {
8477 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8478 			max_len -= calculate_name_len(hdev);
8479 
8480 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8481 			max_len -= 4;
8482 	}
8483 
8484 	return max_len;
8485 }
8486 
flags_managed(u32 adv_flags)8487 static bool flags_managed(u32 adv_flags)
8488 {
8489 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8490 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8491 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8492 }
8493 
tx_power_managed(u32 adv_flags)8494 static bool tx_power_managed(u32 adv_flags)
8495 {
8496 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8497 }
8498 
name_managed(u32 adv_flags)8499 static bool name_managed(u32 adv_flags)
8500 {
8501 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8502 }
8503 
appearance_managed(u32 adv_flags)8504 static bool appearance_managed(u32 adv_flags)
8505 {
8506 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8507 }
8508 
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8509 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8510 			      u8 len, bool is_adv_data)
8511 {
8512 	int i, cur_len;
8513 	u8 max_len;
8514 
8515 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8516 
8517 	if (len > max_len)
8518 		return false;
8519 
8520 	/* Make sure that the data is correctly formatted. */
8521 	for (i = 0; i < len; i += (cur_len + 1)) {
8522 		cur_len = data[i];
8523 
8524 		if (!cur_len)
8525 			continue;
8526 
8527 		if (data[i + 1] == EIR_FLAGS &&
8528 		    (!is_adv_data || flags_managed(adv_flags)))
8529 			return false;
8530 
8531 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8532 			return false;
8533 
8534 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8535 			return false;
8536 
8537 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8538 			return false;
8539 
8540 		if (data[i + 1] == EIR_APPEARANCE &&
8541 		    appearance_managed(adv_flags))
8542 			return false;
8543 
8544 		/* If the current field length would exceed the total data
8545 		 * length, then it's invalid.
8546 		 */
8547 		if (i + cur_len >= len)
8548 			return false;
8549 	}
8550 
8551 	return true;
8552 }
8553 
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8554 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8555 {
8556 	u32 supported_flags, phy_flags;
8557 
8558 	/* The current implementation only supports a subset of the specified
8559 	 * flags. Also need to check mutual exclusiveness of sec flags.
8560 	 */
8561 	supported_flags = get_supported_adv_flags(hdev);
8562 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8563 	if (adv_flags & ~supported_flags ||
8564 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8565 		return false;
8566 
8567 	return true;
8568 }
8569 
adv_busy(struct hci_dev * hdev)8570 static bool adv_busy(struct hci_dev *hdev)
8571 {
8572 	return pending_find(MGMT_OP_SET_LE, hdev);
8573 }
8574 
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8575 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8576 			     int err)
8577 {
8578 	struct adv_info *adv, *n;
8579 
8580 	bt_dev_dbg(hdev, "err %d", err);
8581 
8582 	hci_dev_lock(hdev);
8583 
8584 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8585 		u8 instance;
8586 
8587 		if (!adv->pending)
8588 			continue;
8589 
8590 		if (!err) {
8591 			adv->pending = false;
8592 			continue;
8593 		}
8594 
8595 		instance = adv->instance;
8596 
8597 		if (hdev->cur_adv_instance == instance)
8598 			cancel_adv_timeout(hdev);
8599 
8600 		hci_remove_adv_instance(hdev, instance);
8601 		mgmt_advertising_removed(sk, hdev, instance);
8602 	}
8603 
8604 	hci_dev_unlock(hdev);
8605 }
8606 
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8607 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8608 {
8609 	struct mgmt_pending_cmd *cmd = data;
8610 	struct mgmt_cp_add_advertising *cp = cmd->param;
8611 	struct mgmt_rp_add_advertising rp;
8612 
8613 	memset(&rp, 0, sizeof(rp));
8614 
8615 	rp.instance = cp->instance;
8616 
8617 	if (err)
8618 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8619 				mgmt_status(err));
8620 	else
8621 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8622 				  mgmt_status(err), &rp, sizeof(rp));
8623 
8624 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8625 
8626 	mgmt_pending_free(cmd);
8627 }
8628 
add_advertising_sync(struct hci_dev * hdev,void * data)8629 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8630 {
8631 	struct mgmt_pending_cmd *cmd = data;
8632 	struct mgmt_cp_add_advertising *cp = cmd->param;
8633 
8634 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8635 }
8636 
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8637 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8638 			   void *data, u16 data_len)
8639 {
8640 	struct mgmt_cp_add_advertising *cp = data;
8641 	struct mgmt_rp_add_advertising rp;
8642 	u32 flags;
8643 	u8 status;
8644 	u16 timeout, duration;
8645 	unsigned int prev_instance_cnt;
8646 	u8 schedule_instance = 0;
8647 	struct adv_info *adv, *next_instance;
8648 	int err;
8649 	struct mgmt_pending_cmd *cmd;
8650 
8651 	bt_dev_dbg(hdev, "sock %p", sk);
8652 
8653 	status = mgmt_le_support(hdev);
8654 	if (status)
8655 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8656 				       status);
8657 
8658 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8659 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8660 				       MGMT_STATUS_INVALID_PARAMS);
8661 
8662 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8663 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8664 				       MGMT_STATUS_INVALID_PARAMS);
8665 
8666 	flags = __le32_to_cpu(cp->flags);
8667 	timeout = __le16_to_cpu(cp->timeout);
8668 	duration = __le16_to_cpu(cp->duration);
8669 
8670 	if (!requested_adv_flags_are_valid(hdev, flags))
8671 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8672 				       MGMT_STATUS_INVALID_PARAMS);
8673 
8674 	hci_dev_lock(hdev);
8675 
8676 	if (timeout && !hdev_is_powered(hdev)) {
8677 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8678 				      MGMT_STATUS_REJECTED);
8679 		goto unlock;
8680 	}
8681 
8682 	if (adv_busy(hdev)) {
8683 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8684 				      MGMT_STATUS_BUSY);
8685 		goto unlock;
8686 	}
8687 
8688 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8689 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8690 			       cp->scan_rsp_len, false)) {
8691 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8692 				      MGMT_STATUS_INVALID_PARAMS);
8693 		goto unlock;
8694 	}
8695 
8696 	prev_instance_cnt = hdev->adv_instance_cnt;
8697 
8698 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8699 				   cp->adv_data_len, cp->data,
8700 				   cp->scan_rsp_len,
8701 				   cp->data + cp->adv_data_len,
8702 				   timeout, duration,
8703 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8704 				   hdev->le_adv_min_interval,
8705 				   hdev->le_adv_max_interval, 0);
8706 	if (IS_ERR(adv)) {
8707 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8708 				      MGMT_STATUS_FAILED);
8709 		goto unlock;
8710 	}
8711 
8712 	/* Only trigger an advertising added event if a new instance was
8713 	 * actually added.
8714 	 */
8715 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8716 		mgmt_advertising_added(sk, hdev, cp->instance);
8717 
8718 	if (hdev->cur_adv_instance == cp->instance) {
8719 		/* If the currently advertised instance is being changed then
8720 		 * cancel the current advertising and schedule the next
8721 		 * instance. If there is only one instance then the overridden
8722 		 * advertising data will be visible right away.
8723 		 */
8724 		cancel_adv_timeout(hdev);
8725 
8726 		next_instance = hci_get_next_instance(hdev, cp->instance);
8727 		if (next_instance)
8728 			schedule_instance = next_instance->instance;
8729 	} else if (!hdev->adv_instance_timeout) {
8730 		/* Immediately advertise the new instance if no other
8731 		 * instance is currently being advertised.
8732 		 */
8733 		schedule_instance = cp->instance;
8734 	}
8735 
8736 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8737 	 * there is no instance to be advertised then we have no HCI
8738 	 * communication to make. Simply return.
8739 	 */
8740 	if (!hdev_is_powered(hdev) ||
8741 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8742 	    !schedule_instance) {
8743 		rp.instance = cp->instance;
8744 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8745 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8746 		goto unlock;
8747 	}
8748 
8749 	/* We're good to go, update advertising data, parameters, and start
8750 	 * advertising.
8751 	 */
8752 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8753 			       data_len);
8754 	if (!cmd) {
8755 		err = -ENOMEM;
8756 		goto unlock;
8757 	}
8758 
8759 	cp->instance = schedule_instance;
8760 
8761 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8762 				 add_advertising_complete);
8763 	if (err < 0)
8764 		mgmt_pending_free(cmd);
8765 
8766 unlock:
8767 	hci_dev_unlock(hdev);
8768 
8769 	return err;
8770 }
8771 
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8772 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8773 					int err)
8774 {
8775 	struct mgmt_pending_cmd *cmd = data;
8776 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8777 	struct mgmt_rp_add_ext_adv_params rp;
8778 	struct adv_info *adv;
8779 	u32 flags;
8780 
8781 	BT_DBG("%s", hdev->name);
8782 
8783 	hci_dev_lock(hdev);
8784 
8785 	adv = hci_find_adv_instance(hdev, cp->instance);
8786 	if (!adv)
8787 		goto unlock;
8788 
8789 	rp.instance = cp->instance;
8790 	rp.tx_power = adv->tx_power;
8791 
8792 	/* While we're at it, inform userspace of the available space for this
8793 	 * advertisement, given the flags that will be used.
8794 	 */
8795 	flags = __le32_to_cpu(cp->flags);
8796 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8797 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8798 
8799 	if (err) {
8800 		/* If this advertisement was previously advertising and we
8801 		 * failed to update it, we signal that it has been removed and
8802 		 * delete its structure
8803 		 */
8804 		if (!adv->pending)
8805 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8806 
8807 		hci_remove_adv_instance(hdev, cp->instance);
8808 
8809 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8810 				mgmt_status(err));
8811 	} else {
8812 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8813 				  mgmt_status(err), &rp, sizeof(rp));
8814 	}
8815 
8816 unlock:
8817 	if (cmd)
8818 		mgmt_pending_free(cmd);
8819 
8820 	hci_dev_unlock(hdev);
8821 }
8822 
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8823 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8824 {
8825 	struct mgmt_pending_cmd *cmd = data;
8826 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8827 
8828 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8829 }
8830 
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8831 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8832 			      void *data, u16 data_len)
8833 {
8834 	struct mgmt_cp_add_ext_adv_params *cp = data;
8835 	struct mgmt_rp_add_ext_adv_params rp;
8836 	struct mgmt_pending_cmd *cmd = NULL;
8837 	struct adv_info *adv;
8838 	u32 flags, min_interval, max_interval;
8839 	u16 timeout, duration;
8840 	u8 status;
8841 	s8 tx_power;
8842 	int err;
8843 
8844 	BT_DBG("%s", hdev->name);
8845 
8846 	status = mgmt_le_support(hdev);
8847 	if (status)
8848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8849 				       status);
8850 
8851 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8852 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8853 				       MGMT_STATUS_INVALID_PARAMS);
8854 
8855 	/* The purpose of breaking add_advertising into two separate MGMT calls
8856 	 * for params and data is to allow more parameters to be added to this
8857 	 * structure in the future. For this reason, we verify that we have the
8858 	 * bare minimum structure we know of when the interface was defined. Any
8859 	 * extra parameters we don't know about will be ignored in this request.
8860 	 */
8861 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8862 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8863 				       MGMT_STATUS_INVALID_PARAMS);
8864 
8865 	flags = __le32_to_cpu(cp->flags);
8866 
8867 	if (!requested_adv_flags_are_valid(hdev, flags))
8868 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8869 				       MGMT_STATUS_INVALID_PARAMS);
8870 
8871 	hci_dev_lock(hdev);
8872 
8873 	/* In new interface, we require that we are powered to register */
8874 	if (!hdev_is_powered(hdev)) {
8875 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8876 				      MGMT_STATUS_REJECTED);
8877 		goto unlock;
8878 	}
8879 
8880 	if (adv_busy(hdev)) {
8881 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882 				      MGMT_STATUS_BUSY);
8883 		goto unlock;
8884 	}
8885 
8886 	/* Parse defined parameters from request, use defaults otherwise */
8887 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8888 		  __le16_to_cpu(cp->timeout) : 0;
8889 
8890 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8891 		   __le16_to_cpu(cp->duration) :
8892 		   hdev->def_multi_adv_rotation_duration;
8893 
8894 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8895 		       __le32_to_cpu(cp->min_interval) :
8896 		       hdev->le_adv_min_interval;
8897 
8898 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8899 		       __le32_to_cpu(cp->max_interval) :
8900 		       hdev->le_adv_max_interval;
8901 
8902 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8903 		   cp->tx_power :
8904 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8905 
8906 	/* Create advertising instance with no advertising or response data */
8907 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8908 				   timeout, duration, tx_power, min_interval,
8909 				   max_interval, 0);
8910 
8911 	if (IS_ERR(adv)) {
8912 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8913 				      MGMT_STATUS_FAILED);
8914 		goto unlock;
8915 	}
8916 
8917 	/* Submit request for advertising params if ext adv available */
8918 	if (ext_adv_capable(hdev)) {
8919 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8920 				       data, data_len);
8921 		if (!cmd) {
8922 			err = -ENOMEM;
8923 			hci_remove_adv_instance(hdev, cp->instance);
8924 			goto unlock;
8925 		}
8926 
8927 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8928 					 add_ext_adv_params_complete);
8929 		if (err < 0)
8930 			mgmt_pending_free(cmd);
8931 	} else {
8932 		rp.instance = cp->instance;
8933 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8934 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8935 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8936 		err = mgmt_cmd_complete(sk, hdev->id,
8937 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8938 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8939 	}
8940 
8941 unlock:
8942 	hci_dev_unlock(hdev);
8943 
8944 	return err;
8945 }
8946 
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8947 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8948 {
8949 	struct mgmt_pending_cmd *cmd = data;
8950 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8951 	struct mgmt_rp_add_advertising rp;
8952 
8953 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8954 
8955 	memset(&rp, 0, sizeof(rp));
8956 
8957 	rp.instance = cp->instance;
8958 
8959 	if (err)
8960 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8961 				mgmt_status(err));
8962 	else
8963 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8964 				  mgmt_status(err), &rp, sizeof(rp));
8965 
8966 	mgmt_pending_free(cmd);
8967 }
8968 
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8969 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8970 {
8971 	struct mgmt_pending_cmd *cmd = data;
8972 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8973 	int err;
8974 
8975 	if (ext_adv_capable(hdev)) {
8976 		err = hci_update_adv_data_sync(hdev, cp->instance);
8977 		if (err)
8978 			return err;
8979 
8980 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8981 		if (err)
8982 			return err;
8983 
8984 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8985 	}
8986 
8987 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8988 }
8989 
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8990 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8991 			    u16 data_len)
8992 {
8993 	struct mgmt_cp_add_ext_adv_data *cp = data;
8994 	struct mgmt_rp_add_ext_adv_data rp;
8995 	u8 schedule_instance = 0;
8996 	struct adv_info *next_instance;
8997 	struct adv_info *adv_instance;
8998 	int err = 0;
8999 	struct mgmt_pending_cmd *cmd;
9000 
9001 	BT_DBG("%s", hdev->name);
9002 
9003 	hci_dev_lock(hdev);
9004 
9005 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9006 
9007 	if (!adv_instance) {
9008 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9009 				      MGMT_STATUS_INVALID_PARAMS);
9010 		goto unlock;
9011 	}
9012 
9013 	/* In new interface, we require that we are powered to register */
9014 	if (!hdev_is_powered(hdev)) {
9015 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9016 				      MGMT_STATUS_REJECTED);
9017 		goto clear_new_instance;
9018 	}
9019 
9020 	if (adv_busy(hdev)) {
9021 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9022 				      MGMT_STATUS_BUSY);
9023 		goto clear_new_instance;
9024 	}
9025 
9026 	/* Validate new data */
9027 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9028 			       cp->adv_data_len, true) ||
9029 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9030 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9031 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9032 				      MGMT_STATUS_INVALID_PARAMS);
9033 		goto clear_new_instance;
9034 	}
9035 
9036 	/* Set the data in the advertising instance */
9037 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9038 				  cp->data, cp->scan_rsp_len,
9039 				  cp->data + cp->adv_data_len);
9040 
9041 	/* If using software rotation, determine next instance to use */
9042 	if (hdev->cur_adv_instance == cp->instance) {
9043 		/* If the currently advertised instance is being changed
9044 		 * then cancel the current advertising and schedule the
9045 		 * next instance. If there is only one instance then the
9046 		 * overridden advertising data will be visible right
9047 		 * away
9048 		 */
9049 		cancel_adv_timeout(hdev);
9050 
9051 		next_instance = hci_get_next_instance(hdev, cp->instance);
9052 		if (next_instance)
9053 			schedule_instance = next_instance->instance;
9054 	} else if (!hdev->adv_instance_timeout) {
9055 		/* Immediately advertise the new instance if no other
9056 		 * instance is currently being advertised.
9057 		 */
9058 		schedule_instance = cp->instance;
9059 	}
9060 
9061 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9062 	 * be advertised then we have no HCI communication to make.
9063 	 * Simply return.
9064 	 */
9065 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9066 		if (adv_instance->pending) {
9067 			mgmt_advertising_added(sk, hdev, cp->instance);
9068 			adv_instance->pending = false;
9069 		}
9070 		rp.instance = cp->instance;
9071 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9072 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9073 		goto unlock;
9074 	}
9075 
9076 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9077 			       data_len);
9078 	if (!cmd) {
9079 		err = -ENOMEM;
9080 		goto clear_new_instance;
9081 	}
9082 
9083 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9084 				 add_ext_adv_data_complete);
9085 	if (err < 0) {
9086 		mgmt_pending_free(cmd);
9087 		goto clear_new_instance;
9088 	}
9089 
9090 	/* We were successful in updating data, so trigger advertising_added
9091 	 * event if this is an instance that wasn't previously advertising. If
9092 	 * a failure occurs in the requests we initiated, we will remove the
9093 	 * instance again in add_advertising_complete
9094 	 */
9095 	if (adv_instance->pending)
9096 		mgmt_advertising_added(sk, hdev, cp->instance);
9097 
9098 	goto unlock;
9099 
9100 clear_new_instance:
9101 	hci_remove_adv_instance(hdev, cp->instance);
9102 
9103 unlock:
9104 	hci_dev_unlock(hdev);
9105 
9106 	return err;
9107 }
9108 
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9109 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9110 					int err)
9111 {
9112 	struct mgmt_pending_cmd *cmd = data;
9113 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9114 	struct mgmt_rp_remove_advertising rp;
9115 
9116 	bt_dev_dbg(hdev, "err %d", err);
9117 
9118 	memset(&rp, 0, sizeof(rp));
9119 	rp.instance = cp->instance;
9120 
9121 	if (err)
9122 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9123 				mgmt_status(err));
9124 	else
9125 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9126 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9127 
9128 	mgmt_pending_free(cmd);
9129 }
9130 
remove_advertising_sync(struct hci_dev * hdev,void * data)9131 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9132 {
9133 	struct mgmt_pending_cmd *cmd = data;
9134 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9135 	int err;
9136 
9137 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9138 	if (err)
9139 		return err;
9140 
9141 	if (list_empty(&hdev->adv_instances))
9142 		err = hci_disable_advertising_sync(hdev);
9143 
9144 	return err;
9145 }
9146 
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9147 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9148 			      void *data, u16 data_len)
9149 {
9150 	struct mgmt_cp_remove_advertising *cp = data;
9151 	struct mgmt_pending_cmd *cmd;
9152 	int err;
9153 
9154 	bt_dev_dbg(hdev, "sock %p", sk);
9155 
9156 	hci_dev_lock(hdev);
9157 
9158 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9159 		err = mgmt_cmd_status(sk, hdev->id,
9160 				      MGMT_OP_REMOVE_ADVERTISING,
9161 				      MGMT_STATUS_INVALID_PARAMS);
9162 		goto unlock;
9163 	}
9164 
9165 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9166 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9167 				      MGMT_STATUS_BUSY);
9168 		goto unlock;
9169 	}
9170 
9171 	if (list_empty(&hdev->adv_instances)) {
9172 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9173 				      MGMT_STATUS_INVALID_PARAMS);
9174 		goto unlock;
9175 	}
9176 
9177 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9178 			       data_len);
9179 	if (!cmd) {
9180 		err = -ENOMEM;
9181 		goto unlock;
9182 	}
9183 
9184 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9185 				 remove_advertising_complete);
9186 	if (err < 0)
9187 		mgmt_pending_free(cmd);
9188 
9189 unlock:
9190 	hci_dev_unlock(hdev);
9191 
9192 	return err;
9193 }
9194 
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9195 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9196 			     void *data, u16 data_len)
9197 {
9198 	struct mgmt_cp_get_adv_size_info *cp = data;
9199 	struct mgmt_rp_get_adv_size_info rp;
9200 	u32 flags, supported_flags;
9201 
9202 	bt_dev_dbg(hdev, "sock %p", sk);
9203 
9204 	if (!lmp_le_capable(hdev))
9205 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9206 				       MGMT_STATUS_REJECTED);
9207 
9208 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9209 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9210 				       MGMT_STATUS_INVALID_PARAMS);
9211 
9212 	flags = __le32_to_cpu(cp->flags);
9213 
9214 	/* The current implementation only supports a subset of the specified
9215 	 * flags.
9216 	 */
9217 	supported_flags = get_supported_adv_flags(hdev);
9218 	if (flags & ~supported_flags)
9219 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9220 				       MGMT_STATUS_INVALID_PARAMS);
9221 
9222 	rp.instance = cp->instance;
9223 	rp.flags = cp->flags;
9224 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9225 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9226 
9227 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9228 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9229 }
9230 
9231 static const struct hci_mgmt_handler mgmt_handlers[] = {
9232 	{ NULL }, /* 0x0000 (no command) */
9233 	{ read_version,            MGMT_READ_VERSION_SIZE,
9234 						HCI_MGMT_NO_HDEV |
9235 						HCI_MGMT_UNTRUSTED },
9236 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9237 						HCI_MGMT_NO_HDEV |
9238 						HCI_MGMT_UNTRUSTED },
9239 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9240 						HCI_MGMT_NO_HDEV |
9241 						HCI_MGMT_UNTRUSTED },
9242 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9243 						HCI_MGMT_UNTRUSTED },
9244 	{ set_powered,             MGMT_SETTING_SIZE },
9245 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9246 	{ set_connectable,         MGMT_SETTING_SIZE },
9247 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9248 	{ set_bondable,            MGMT_SETTING_SIZE },
9249 	{ set_link_security,       MGMT_SETTING_SIZE },
9250 	{ set_ssp,                 MGMT_SETTING_SIZE },
9251 	{ set_hs,                  MGMT_SETTING_SIZE },
9252 	{ set_le,                  MGMT_SETTING_SIZE },
9253 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9254 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9255 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9256 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9257 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9258 						HCI_MGMT_VAR_LEN },
9259 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9260 						HCI_MGMT_VAR_LEN },
9261 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9262 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9263 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9264 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9265 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9266 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9267 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9268 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9269 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9270 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9271 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9272 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9273 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9274 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9275 						HCI_MGMT_VAR_LEN },
9276 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9277 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9278 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9279 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9280 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9281 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9282 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9283 	{ set_advertising,         MGMT_SETTING_SIZE },
9284 	{ set_bredr,               MGMT_SETTING_SIZE },
9285 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9286 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9287 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9288 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9289 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9290 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9291 						HCI_MGMT_VAR_LEN },
9292 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9293 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9294 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9295 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9296 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9297 						HCI_MGMT_VAR_LEN },
9298 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9299 						HCI_MGMT_NO_HDEV |
9300 						HCI_MGMT_UNTRUSTED },
9301 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9302 						HCI_MGMT_UNCONFIGURED |
9303 						HCI_MGMT_UNTRUSTED },
9304 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9305 						HCI_MGMT_UNCONFIGURED },
9306 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9307 						HCI_MGMT_UNCONFIGURED },
9308 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9309 						HCI_MGMT_VAR_LEN },
9310 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9311 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9312 						HCI_MGMT_NO_HDEV |
9313 						HCI_MGMT_UNTRUSTED },
9314 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9315 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9316 						HCI_MGMT_VAR_LEN },
9317 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9318 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9319 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9320 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9321 						HCI_MGMT_UNTRUSTED },
9322 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9323 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9324 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9325 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9326 						HCI_MGMT_VAR_LEN },
9327 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9328 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9329 						HCI_MGMT_UNTRUSTED },
9330 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9331 						HCI_MGMT_UNTRUSTED |
9332 						HCI_MGMT_HDEV_OPTIONAL },
9333 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9334 						HCI_MGMT_VAR_LEN |
9335 						HCI_MGMT_HDEV_OPTIONAL },
9336 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9337 						HCI_MGMT_UNTRUSTED },
9338 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9339 						HCI_MGMT_VAR_LEN },
9340 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9341 						HCI_MGMT_UNTRUSTED },
9342 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9343 						HCI_MGMT_VAR_LEN },
9344 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9345 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9346 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9347 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9348 						HCI_MGMT_VAR_LEN },
9349 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9350 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9351 						HCI_MGMT_VAR_LEN },
9352 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9353 						HCI_MGMT_VAR_LEN },
9354 	{ add_adv_patterns_monitor_rssi,
9355 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9356 						HCI_MGMT_VAR_LEN },
9357 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9358 						HCI_MGMT_VAR_LEN },
9359 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9360 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9361 						HCI_MGMT_VAR_LEN },
9362 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9363 };
9364 
mgmt_index_added(struct hci_dev * hdev)9365 void mgmt_index_added(struct hci_dev *hdev)
9366 {
9367 	struct mgmt_ev_ext_index ev;
9368 
9369 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9370 		return;
9371 
9372 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9373 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9374 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9375 		ev.type = 0x01;
9376 	} else {
9377 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9378 				 HCI_MGMT_INDEX_EVENTS);
9379 		ev.type = 0x00;
9380 	}
9381 
9382 	ev.bus = hdev->bus;
9383 
9384 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9385 			 HCI_MGMT_EXT_INDEX_EVENTS);
9386 }
9387 
mgmt_index_removed(struct hci_dev * hdev)9388 void mgmt_index_removed(struct hci_dev *hdev)
9389 {
9390 	struct mgmt_ev_ext_index ev;
9391 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9392 
9393 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9394 		return;
9395 
9396 	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9397 
9398 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9399 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9400 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9401 		ev.type = 0x01;
9402 	} else {
9403 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9404 				 HCI_MGMT_INDEX_EVENTS);
9405 		ev.type = 0x00;
9406 	}
9407 
9408 	ev.bus = hdev->bus;
9409 
9410 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9411 			 HCI_MGMT_EXT_INDEX_EVENTS);
9412 
9413 	/* Cancel any remaining timed work */
9414 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9415 		return;
9416 	cancel_delayed_work_sync(&hdev->discov_off);
9417 	cancel_delayed_work_sync(&hdev->service_cache);
9418 	cancel_delayed_work_sync(&hdev->rpa_expired);
9419 }
9420 
mgmt_power_on(struct hci_dev * hdev,int err)9421 void mgmt_power_on(struct hci_dev *hdev, int err)
9422 {
9423 	struct cmd_lookup match = { NULL, hdev };
9424 
9425 	bt_dev_dbg(hdev, "err %d", err);
9426 
9427 	hci_dev_lock(hdev);
9428 
9429 	if (!err) {
9430 		restart_le_actions(hdev);
9431 		hci_update_passive_scan(hdev);
9432 	}
9433 
9434 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9435 			     &match);
9436 
9437 	new_settings(hdev, match.sk);
9438 
9439 	if (match.sk)
9440 		sock_put(match.sk);
9441 
9442 	hci_dev_unlock(hdev);
9443 }
9444 
__mgmt_power_off(struct hci_dev * hdev)9445 void __mgmt_power_off(struct hci_dev *hdev)
9446 {
9447 	struct cmd_lookup match = { NULL, hdev };
9448 	u8 zero_cod[] = { 0, 0, 0 };
9449 
9450 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9451 			     &match);
9452 
9453 	/* If the power off is because of hdev unregistration let
9454 	 * use the appropriate INVALID_INDEX status. Otherwise use
9455 	 * NOT_POWERED. We cover both scenarios here since later in
9456 	 * mgmt_index_removed() any hci_conn callbacks will have already
9457 	 * been triggered, potentially causing misleading DISCONNECTED
9458 	 * status responses.
9459 	 */
9460 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9461 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9462 	else
9463 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9464 
9465 	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9466 
9467 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9468 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9469 				   zero_cod, sizeof(zero_cod),
9470 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9471 		ext_info_changed(hdev, NULL);
9472 	}
9473 
9474 	new_settings(hdev, match.sk);
9475 
9476 	if (match.sk)
9477 		sock_put(match.sk);
9478 }
9479 
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9480 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9481 {
9482 	struct mgmt_pending_cmd *cmd;
9483 	u8 status;
9484 
9485 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9486 	if (!cmd)
9487 		return;
9488 
9489 	if (err == -ERFKILL)
9490 		status = MGMT_STATUS_RFKILLED;
9491 	else
9492 		status = MGMT_STATUS_FAILED;
9493 
9494 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9495 
9496 	mgmt_pending_remove(cmd);
9497 }
9498 
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9499 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9500 		       bool persistent)
9501 {
9502 	struct mgmt_ev_new_link_key ev;
9503 
9504 	memset(&ev, 0, sizeof(ev));
9505 
9506 	ev.store_hint = persistent;
9507 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9508 	ev.key.addr.type = BDADDR_BREDR;
9509 	ev.key.type = key->type;
9510 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9511 	ev.key.pin_len = key->pin_len;
9512 
9513 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9514 }
9515 
mgmt_ltk_type(struct smp_ltk * ltk)9516 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9517 {
9518 	switch (ltk->type) {
9519 	case SMP_LTK:
9520 	case SMP_LTK_RESPONDER:
9521 		if (ltk->authenticated)
9522 			return MGMT_LTK_AUTHENTICATED;
9523 		return MGMT_LTK_UNAUTHENTICATED;
9524 	case SMP_LTK_P256:
9525 		if (ltk->authenticated)
9526 			return MGMT_LTK_P256_AUTH;
9527 		return MGMT_LTK_P256_UNAUTH;
9528 	case SMP_LTK_P256_DEBUG:
9529 		return MGMT_LTK_P256_DEBUG;
9530 	}
9531 
9532 	return MGMT_LTK_UNAUTHENTICATED;
9533 }
9534 
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9535 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9536 {
9537 	struct mgmt_ev_new_long_term_key ev;
9538 
9539 	memset(&ev, 0, sizeof(ev));
9540 
9541 	/* Devices using resolvable or non-resolvable random addresses
9542 	 * without providing an identity resolving key don't require
9543 	 * to store long term keys. Their addresses will change the
9544 	 * next time around.
9545 	 *
9546 	 * Only when a remote device provides an identity address
9547 	 * make sure the long term key is stored. If the remote
9548 	 * identity is known, the long term keys are internally
9549 	 * mapped to the identity address. So allow static random
9550 	 * and public addresses here.
9551 	 */
9552 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9553 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9554 		ev.store_hint = 0x00;
9555 	else
9556 		ev.store_hint = persistent;
9557 
9558 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9559 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9560 	ev.key.type = mgmt_ltk_type(key);
9561 	ev.key.enc_size = key->enc_size;
9562 	ev.key.ediv = key->ediv;
9563 	ev.key.rand = key->rand;
9564 
9565 	if (key->type == SMP_LTK)
9566 		ev.key.initiator = 1;
9567 
9568 	/* Make sure we copy only the significant bytes based on the
9569 	 * encryption key size, and set the rest of the value to zeroes.
9570 	 */
9571 	memcpy(ev.key.val, key->val, key->enc_size);
9572 	memset(ev.key.val + key->enc_size, 0,
9573 	       sizeof(ev.key.val) - key->enc_size);
9574 
9575 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9576 }
9577 
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9578 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9579 {
9580 	struct mgmt_ev_new_irk ev;
9581 
9582 	memset(&ev, 0, sizeof(ev));
9583 
9584 	ev.store_hint = persistent;
9585 
9586 	bacpy(&ev.rpa, &irk->rpa);
9587 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9588 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9589 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9590 
9591 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9592 }
9593 
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9594 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9595 		   bool persistent)
9596 {
9597 	struct mgmt_ev_new_csrk ev;
9598 
9599 	memset(&ev, 0, sizeof(ev));
9600 
9601 	/* Devices using resolvable or non-resolvable random addresses
9602 	 * without providing an identity resolving key don't require
9603 	 * to store signature resolving keys. Their addresses will change
9604 	 * the next time around.
9605 	 *
9606 	 * Only when a remote device provides an identity address
9607 	 * make sure the signature resolving key is stored. So allow
9608 	 * static random and public addresses here.
9609 	 */
9610 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9611 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9612 		ev.store_hint = 0x00;
9613 	else
9614 		ev.store_hint = persistent;
9615 
9616 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9617 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9618 	ev.key.type = csrk->type;
9619 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9620 
9621 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9622 }
9623 
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9624 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9625 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9626 			 u16 max_interval, u16 latency, u16 timeout)
9627 {
9628 	struct mgmt_ev_new_conn_param ev;
9629 
9630 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9631 		return;
9632 
9633 	memset(&ev, 0, sizeof(ev));
9634 	bacpy(&ev.addr.bdaddr, bdaddr);
9635 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9636 	ev.store_hint = store_hint;
9637 	ev.min_interval = cpu_to_le16(min_interval);
9638 	ev.max_interval = cpu_to_le16(max_interval);
9639 	ev.latency = cpu_to_le16(latency);
9640 	ev.timeout = cpu_to_le16(timeout);
9641 
9642 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9643 }
9644 
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9645 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9646 			   u8 *name, u8 name_len)
9647 {
9648 	struct sk_buff *skb;
9649 	struct mgmt_ev_device_connected *ev;
9650 	u16 eir_len = 0;
9651 	u32 flags = 0;
9652 
9653 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9654 		return;
9655 
9656 	/* allocate buff for LE or BR/EDR adv */
9657 	if (conn->le_adv_data_len > 0)
9658 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9659 				     sizeof(*ev) + conn->le_adv_data_len);
9660 	else
9661 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9662 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9663 				     eir_precalc_len(sizeof(conn->dev_class)));
9664 
9665 	if (!skb)
9666 		return;
9667 
9668 	ev = skb_put(skb, sizeof(*ev));
9669 	bacpy(&ev->addr.bdaddr, &conn->dst);
9670 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9671 
9672 	if (conn->out)
9673 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9674 
9675 	ev->flags = __cpu_to_le32(flags);
9676 
9677 	/* We must ensure that the EIR Data fields are ordered and
9678 	 * unique. Keep it simple for now and avoid the problem by not
9679 	 * adding any BR/EDR data to the LE adv.
9680 	 */
9681 	if (conn->le_adv_data_len > 0) {
9682 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9683 		eir_len = conn->le_adv_data_len;
9684 	} else {
9685 		if (name)
9686 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9687 
9688 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9689 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9690 						    conn->dev_class, sizeof(conn->dev_class));
9691 	}
9692 
9693 	ev->eir_len = cpu_to_le16(eir_len);
9694 
9695 	mgmt_event_skb(skb, NULL);
9696 }
9697 
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9698 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9699 {
9700 	struct hci_dev *hdev = data;
9701 	struct mgmt_cp_unpair_device *cp = cmd->param;
9702 
9703 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9704 
9705 	cmd->cmd_complete(cmd, 0);
9706 }
9707 
mgmt_powering_down(struct hci_dev * hdev)9708 bool mgmt_powering_down(struct hci_dev *hdev)
9709 {
9710 	struct mgmt_pending_cmd *cmd;
9711 	struct mgmt_mode *cp;
9712 
9713 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9714 	if (!cmd)
9715 		return false;
9716 
9717 	cp = cmd->param;
9718 	if (!cp->val)
9719 		return true;
9720 
9721 	return false;
9722 }
9723 
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9724 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9725 			      u8 link_type, u8 addr_type, u8 reason,
9726 			      bool mgmt_connected)
9727 {
9728 	struct mgmt_ev_device_disconnected ev;
9729 	struct sock *sk = NULL;
9730 
9731 	if (!mgmt_connected)
9732 		return;
9733 
9734 	if (link_type != ACL_LINK && link_type != LE_LINK)
9735 		return;
9736 
9737 	bacpy(&ev.addr.bdaddr, bdaddr);
9738 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9739 	ev.reason = reason;
9740 
9741 	/* Report disconnects due to suspend */
9742 	if (hdev->suspended)
9743 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9744 
9745 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9746 
9747 	if (sk)
9748 		sock_put(sk);
9749 }
9750 
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9751 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9752 			    u8 link_type, u8 addr_type, u8 status)
9753 {
9754 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9755 	struct mgmt_cp_disconnect *cp;
9756 	struct mgmt_pending_cmd *cmd;
9757 
9758 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9759 			     unpair_device_rsp, hdev);
9760 
9761 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9762 	if (!cmd)
9763 		return;
9764 
9765 	cp = cmd->param;
9766 
9767 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9768 		return;
9769 
9770 	if (cp->addr.type != bdaddr_type)
9771 		return;
9772 
9773 	cmd->cmd_complete(cmd, mgmt_status(status));
9774 	mgmt_pending_remove(cmd);
9775 }
9776 
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9777 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9778 {
9779 	struct mgmt_ev_connect_failed ev;
9780 
9781 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9782 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9783 					 conn->dst_type, status, true);
9784 		return;
9785 	}
9786 
9787 	bacpy(&ev.addr.bdaddr, &conn->dst);
9788 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9789 	ev.status = mgmt_status(status);
9790 
9791 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9792 }
9793 
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9794 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9795 {
9796 	struct mgmt_ev_pin_code_request ev;
9797 
9798 	bacpy(&ev.addr.bdaddr, bdaddr);
9799 	ev.addr.type = BDADDR_BREDR;
9800 	ev.secure = secure;
9801 
9802 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9803 }
9804 
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9805 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9806 				  u8 status)
9807 {
9808 	struct mgmt_pending_cmd *cmd;
9809 
9810 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9811 	if (!cmd)
9812 		return;
9813 
9814 	cmd->cmd_complete(cmd, mgmt_status(status));
9815 	mgmt_pending_remove(cmd);
9816 }
9817 
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9818 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9819 				      u8 status)
9820 {
9821 	struct mgmt_pending_cmd *cmd;
9822 
9823 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9824 	if (!cmd)
9825 		return;
9826 
9827 	cmd->cmd_complete(cmd, mgmt_status(status));
9828 	mgmt_pending_remove(cmd);
9829 }
9830 
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9831 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9832 			      u8 link_type, u8 addr_type, u32 value,
9833 			      u8 confirm_hint)
9834 {
9835 	struct mgmt_ev_user_confirm_request ev;
9836 
9837 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9838 
9839 	bacpy(&ev.addr.bdaddr, bdaddr);
9840 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9841 	ev.confirm_hint = confirm_hint;
9842 	ev.value = cpu_to_le32(value);
9843 
9844 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9845 			  NULL);
9846 }
9847 
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9848 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9849 			      u8 link_type, u8 addr_type)
9850 {
9851 	struct mgmt_ev_user_passkey_request ev;
9852 
9853 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9854 
9855 	bacpy(&ev.addr.bdaddr, bdaddr);
9856 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9857 
9858 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9859 			  NULL);
9860 }
9861 
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9862 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9863 				      u8 link_type, u8 addr_type, u8 status,
9864 				      u8 opcode)
9865 {
9866 	struct mgmt_pending_cmd *cmd;
9867 
9868 	cmd = pending_find(opcode, hdev);
9869 	if (!cmd)
9870 		return -ENOENT;
9871 
9872 	cmd->cmd_complete(cmd, mgmt_status(status));
9873 	mgmt_pending_remove(cmd);
9874 
9875 	return 0;
9876 }
9877 
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9878 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9879 				     u8 link_type, u8 addr_type, u8 status)
9880 {
9881 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9882 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9883 }
9884 
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9885 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9886 					 u8 link_type, u8 addr_type, u8 status)
9887 {
9888 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9889 					  status,
9890 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9891 }
9892 
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9893 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9894 				     u8 link_type, u8 addr_type, u8 status)
9895 {
9896 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9897 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9898 }
9899 
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9900 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9901 					 u8 link_type, u8 addr_type, u8 status)
9902 {
9903 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9904 					  status,
9905 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9906 }
9907 
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9908 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9909 			     u8 link_type, u8 addr_type, u32 passkey,
9910 			     u8 entered)
9911 {
9912 	struct mgmt_ev_passkey_notify ev;
9913 
9914 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9915 
9916 	bacpy(&ev.addr.bdaddr, bdaddr);
9917 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9918 	ev.passkey = __cpu_to_le32(passkey);
9919 	ev.entered = entered;
9920 
9921 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9922 }
9923 
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9924 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9925 {
9926 	struct mgmt_ev_auth_failed ev;
9927 	struct mgmt_pending_cmd *cmd;
9928 	u8 status = mgmt_status(hci_status);
9929 
9930 	bacpy(&ev.addr.bdaddr, &conn->dst);
9931 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9932 	ev.status = status;
9933 
9934 	cmd = find_pairing(conn);
9935 
9936 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9937 		    cmd ? cmd->sk : NULL);
9938 
9939 	if (cmd) {
9940 		cmd->cmd_complete(cmd, status);
9941 		mgmt_pending_remove(cmd);
9942 	}
9943 }
9944 
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9945 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9946 {
9947 	struct cmd_lookup match = { NULL, hdev };
9948 	bool changed;
9949 
9950 	if (status) {
9951 		u8 mgmt_err = mgmt_status(status);
9952 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9953 				     cmd_status_rsp, &mgmt_err);
9954 		return;
9955 	}
9956 
9957 	if (test_bit(HCI_AUTH, &hdev->flags))
9958 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9959 	else
9960 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9961 
9962 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9963 			     settings_rsp, &match);
9964 
9965 	if (changed)
9966 		new_settings(hdev, match.sk);
9967 
9968 	if (match.sk)
9969 		sock_put(match.sk);
9970 }
9971 
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9972 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9973 {
9974 	struct cmd_lookup *match = data;
9975 
9976 	if (match->sk == NULL) {
9977 		match->sk = cmd->sk;
9978 		sock_hold(match->sk);
9979 	}
9980 }
9981 
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9982 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9983 				    u8 status)
9984 {
9985 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9986 
9987 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
9988 			     &match);
9989 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
9990 			     &match);
9991 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
9992 			     &match);
9993 
9994 	if (!status) {
9995 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9996 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9997 		ext_info_changed(hdev, NULL);
9998 	}
9999 
10000 	if (match.sk)
10001 		sock_put(match.sk);
10002 }
10003 
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10004 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10005 {
10006 	struct mgmt_cp_set_local_name ev;
10007 	struct mgmt_pending_cmd *cmd;
10008 
10009 	if (status)
10010 		return;
10011 
10012 	memset(&ev, 0, sizeof(ev));
10013 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10014 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10015 
10016 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10017 	if (!cmd) {
10018 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10019 
10020 		/* If this is a HCI command related to powering on the
10021 		 * HCI dev don't send any mgmt signals.
10022 		 */
10023 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10024 			return;
10025 	}
10026 
10027 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10028 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10029 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10030 }
10031 
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10032 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10033 {
10034 	int i;
10035 
10036 	for (i = 0; i < uuid_count; i++) {
10037 		if (!memcmp(uuid, uuids[i], 16))
10038 			return true;
10039 	}
10040 
10041 	return false;
10042 }
10043 
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10044 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10045 {
10046 	u16 parsed = 0;
10047 
10048 	while (parsed < eir_len) {
10049 		u8 field_len = eir[0];
10050 		u8 uuid[16];
10051 		int i;
10052 
10053 		if (field_len == 0)
10054 			break;
10055 
10056 		if (eir_len - parsed < field_len + 1)
10057 			break;
10058 
10059 		switch (eir[1]) {
10060 		case EIR_UUID16_ALL:
10061 		case EIR_UUID16_SOME:
10062 			for (i = 0; i + 3 <= field_len; i += 2) {
10063 				memcpy(uuid, bluetooth_base_uuid, 16);
10064 				uuid[13] = eir[i + 3];
10065 				uuid[12] = eir[i + 2];
10066 				if (has_uuid(uuid, uuid_count, uuids))
10067 					return true;
10068 			}
10069 			break;
10070 		case EIR_UUID32_ALL:
10071 		case EIR_UUID32_SOME:
10072 			for (i = 0; i + 5 <= field_len; i += 4) {
10073 				memcpy(uuid, bluetooth_base_uuid, 16);
10074 				uuid[15] = eir[i + 5];
10075 				uuid[14] = eir[i + 4];
10076 				uuid[13] = eir[i + 3];
10077 				uuid[12] = eir[i + 2];
10078 				if (has_uuid(uuid, uuid_count, uuids))
10079 					return true;
10080 			}
10081 			break;
10082 		case EIR_UUID128_ALL:
10083 		case EIR_UUID128_SOME:
10084 			for (i = 0; i + 17 <= field_len; i += 16) {
10085 				memcpy(uuid, eir + i + 2, 16);
10086 				if (has_uuid(uuid, uuid_count, uuids))
10087 					return true;
10088 			}
10089 			break;
10090 		}
10091 
10092 		parsed += field_len + 1;
10093 		eir += field_len + 1;
10094 	}
10095 
10096 	return false;
10097 }
10098 
restart_le_scan(struct hci_dev * hdev)10099 static void restart_le_scan(struct hci_dev *hdev)
10100 {
10101 	/* If controller is not scanning we are done. */
10102 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10103 		return;
10104 
10105 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10106 		       hdev->discovery.scan_start +
10107 		       hdev->discovery.scan_duration))
10108 		return;
10109 
10110 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10111 			   DISCOV_LE_RESTART_DELAY);
10112 }
10113 
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10114 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10115 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10116 {
10117 	/* If a RSSI threshold has been specified, and
10118 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10119 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10120 	 * is set, let it through for further processing, as we might need to
10121 	 * restart the scan.
10122 	 *
10123 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10124 	 * the results are also dropped.
10125 	 */
10126 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10127 	    (rssi == HCI_RSSI_INVALID ||
10128 	    (rssi < hdev->discovery.rssi &&
10129 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10130 		return  false;
10131 
10132 	if (hdev->discovery.uuid_count != 0) {
10133 		/* If a list of UUIDs is provided in filter, results with no
10134 		 * matching UUID should be dropped.
10135 		 */
10136 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10137 				   hdev->discovery.uuids) &&
10138 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10139 				   hdev->discovery.uuid_count,
10140 				   hdev->discovery.uuids))
10141 			return false;
10142 	}
10143 
10144 	/* If duplicate filtering does not report RSSI changes, then restart
10145 	 * scanning to ensure updated result with updated RSSI values.
10146 	 */
10147 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10148 		restart_le_scan(hdev);
10149 
10150 		/* Validate RSSI value against the RSSI threshold once more. */
10151 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10152 		    rssi < hdev->discovery.rssi)
10153 			return false;
10154 	}
10155 
10156 	return true;
10157 }
10158 
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10159 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10160 				  bdaddr_t *bdaddr, u8 addr_type)
10161 {
10162 	struct mgmt_ev_adv_monitor_device_lost ev;
10163 
10164 	ev.monitor_handle = cpu_to_le16(handle);
10165 	bacpy(&ev.addr.bdaddr, bdaddr);
10166 	ev.addr.type = addr_type;
10167 
10168 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10169 		   NULL);
10170 }
10171 
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10172 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10173 					       struct sk_buff *skb,
10174 					       struct sock *skip_sk,
10175 					       u16 handle)
10176 {
10177 	struct sk_buff *advmon_skb;
10178 	size_t advmon_skb_len;
10179 	__le16 *monitor_handle;
10180 
10181 	if (!skb)
10182 		return;
10183 
10184 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10185 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10186 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10187 				    advmon_skb_len);
10188 	if (!advmon_skb)
10189 		return;
10190 
10191 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10192 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10193 	 * store monitor_handle of the matched monitor.
10194 	 */
10195 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10196 	*monitor_handle = cpu_to_le16(handle);
10197 	skb_put_data(advmon_skb, skb->data, skb->len);
10198 
10199 	mgmt_event_skb(advmon_skb, skip_sk);
10200 }
10201 
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10202 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10203 					  bdaddr_t *bdaddr, bool report_device,
10204 					  struct sk_buff *skb,
10205 					  struct sock *skip_sk)
10206 {
10207 	struct monitored_device *dev, *tmp;
10208 	bool matched = false;
10209 	bool notified = false;
10210 
10211 	/* We have received the Advertisement Report because:
10212 	 * 1. the kernel has initiated active discovery
10213 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10214 	 *    passive scanning
10215 	 * 3. if none of the above is true, we have one or more active
10216 	 *    Advertisement Monitor
10217 	 *
10218 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10219 	 * and report ONLY one advertisement per device for the matched Monitor
10220 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10221 	 *
10222 	 * For case 3, since we are not active scanning and all advertisements
10223 	 * received are due to a matched Advertisement Monitor, report all
10224 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10225 	 */
10226 	if (report_device && !hdev->advmon_pend_notify) {
10227 		mgmt_event_skb(skb, skip_sk);
10228 		return;
10229 	}
10230 
10231 	hdev->advmon_pend_notify = false;
10232 
10233 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10234 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10235 			matched = true;
10236 
10237 			if (!dev->notified) {
10238 				mgmt_send_adv_monitor_device_found(hdev, skb,
10239 								   skip_sk,
10240 								   dev->handle);
10241 				notified = true;
10242 				dev->notified = true;
10243 			}
10244 		}
10245 
10246 		if (!dev->notified)
10247 			hdev->advmon_pend_notify = true;
10248 	}
10249 
10250 	if (!report_device &&
10251 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10252 		/* Handle 0 indicates that we are not active scanning and this
10253 		 * is a subsequent advertisement report for an already matched
10254 		 * Advertisement Monitor or the controller offloading support
10255 		 * is not available.
10256 		 */
10257 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10258 	}
10259 
10260 	if (report_device)
10261 		mgmt_event_skb(skb, skip_sk);
10262 	else
10263 		kfree_skb(skb);
10264 }
10265 
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10266 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10267 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10268 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10269 			      u64 instant)
10270 {
10271 	struct sk_buff *skb;
10272 	struct mgmt_ev_mesh_device_found *ev;
10273 	int i, j;
10274 
10275 	if (!hdev->mesh_ad_types[0])
10276 		goto accepted;
10277 
10278 	/* Scan for requested AD types */
10279 	if (eir_len > 0) {
10280 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10281 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10282 				if (!hdev->mesh_ad_types[j])
10283 					break;
10284 
10285 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10286 					goto accepted;
10287 			}
10288 		}
10289 	}
10290 
10291 	if (scan_rsp_len > 0) {
10292 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10293 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10294 				if (!hdev->mesh_ad_types[j])
10295 					break;
10296 
10297 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10298 					goto accepted;
10299 			}
10300 		}
10301 	}
10302 
10303 	return;
10304 
10305 accepted:
10306 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10307 			     sizeof(*ev) + eir_len + scan_rsp_len);
10308 	if (!skb)
10309 		return;
10310 
10311 	ev = skb_put(skb, sizeof(*ev));
10312 
10313 	bacpy(&ev->addr.bdaddr, bdaddr);
10314 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10315 	ev->rssi = rssi;
10316 	ev->flags = cpu_to_le32(flags);
10317 	ev->instant = cpu_to_le64(instant);
10318 
10319 	if (eir_len > 0)
10320 		/* Copy EIR or advertising data into event */
10321 		skb_put_data(skb, eir, eir_len);
10322 
10323 	if (scan_rsp_len > 0)
10324 		/* Append scan response data to event */
10325 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10326 
10327 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10328 
10329 	mgmt_event_skb(skb, NULL);
10330 }
10331 
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10332 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10333 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10334 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10335 		       u64 instant)
10336 {
10337 	struct sk_buff *skb;
10338 	struct mgmt_ev_device_found *ev;
10339 	bool report_device = hci_discovery_active(hdev);
10340 
10341 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10342 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10343 				  eir, eir_len, scan_rsp, scan_rsp_len,
10344 				  instant);
10345 
10346 	/* Don't send events for a non-kernel initiated discovery. With
10347 	 * LE one exception is if we have pend_le_reports > 0 in which
10348 	 * case we're doing passive scanning and want these events.
10349 	 */
10350 	if (!hci_discovery_active(hdev)) {
10351 		if (link_type == ACL_LINK)
10352 			return;
10353 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10354 			report_device = true;
10355 		else if (!hci_is_adv_monitoring(hdev))
10356 			return;
10357 	}
10358 
10359 	if (hdev->discovery.result_filtering) {
10360 		/* We are using service discovery */
10361 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10362 				     scan_rsp_len))
10363 			return;
10364 	}
10365 
10366 	if (hdev->discovery.limited) {
10367 		/* Check for limited discoverable bit */
10368 		if (dev_class) {
10369 			if (!(dev_class[1] & 0x20))
10370 				return;
10371 		} else {
10372 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10373 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10374 				return;
10375 		}
10376 	}
10377 
10378 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10379 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10380 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10381 	if (!skb)
10382 		return;
10383 
10384 	ev = skb_put(skb, sizeof(*ev));
10385 
10386 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10387 	 * RSSI value was reported as 0 when not available. This behavior
10388 	 * is kept when using device discovery. This is required for full
10389 	 * backwards compatibility with the API.
10390 	 *
10391 	 * However when using service discovery, the value 127 will be
10392 	 * returned when the RSSI is not available.
10393 	 */
10394 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10395 	    link_type == ACL_LINK)
10396 		rssi = 0;
10397 
10398 	bacpy(&ev->addr.bdaddr, bdaddr);
10399 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10400 	ev->rssi = rssi;
10401 	ev->flags = cpu_to_le32(flags);
10402 
10403 	if (eir_len > 0)
10404 		/* Copy EIR or advertising data into event */
10405 		skb_put_data(skb, eir, eir_len);
10406 
10407 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10408 		u8 eir_cod[5];
10409 
10410 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10411 					   dev_class, 3);
10412 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10413 	}
10414 
10415 	if (scan_rsp_len > 0)
10416 		/* Append scan response data to event */
10417 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10418 
10419 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10420 
10421 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10422 }
10423 
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10424 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10425 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10426 {
10427 	struct sk_buff *skb;
10428 	struct mgmt_ev_device_found *ev;
10429 	u16 eir_len = 0;
10430 	u32 flags = 0;
10431 
10432 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10433 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10434 	if (!skb)
10435 		return;
10436 
10437 	ev = skb_put(skb, sizeof(*ev));
10438 	bacpy(&ev->addr.bdaddr, bdaddr);
10439 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10440 	ev->rssi = rssi;
10441 
10442 	if (name)
10443 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10444 	else
10445 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10446 
10447 	ev->eir_len = cpu_to_le16(eir_len);
10448 	ev->flags = cpu_to_le32(flags);
10449 
10450 	mgmt_event_skb(skb, NULL);
10451 }
10452 
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10453 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10454 {
10455 	struct mgmt_ev_discovering ev;
10456 
10457 	bt_dev_dbg(hdev, "discovering %u", discovering);
10458 
10459 	memset(&ev, 0, sizeof(ev));
10460 	ev.type = hdev->discovery.type;
10461 	ev.discovering = discovering;
10462 
10463 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10464 }
10465 
mgmt_suspending(struct hci_dev * hdev,u8 state)10466 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10467 {
10468 	struct mgmt_ev_controller_suspend ev;
10469 
10470 	ev.suspend_state = state;
10471 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10472 }
10473 
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10474 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10475 		   u8 addr_type)
10476 {
10477 	struct mgmt_ev_controller_resume ev;
10478 
10479 	ev.wake_reason = reason;
10480 	if (bdaddr) {
10481 		bacpy(&ev.addr.bdaddr, bdaddr);
10482 		ev.addr.type = addr_type;
10483 	} else {
10484 		memset(&ev.addr, 0, sizeof(ev.addr));
10485 	}
10486 
10487 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10488 }
10489 
10490 static struct hci_mgmt_chan chan = {
10491 	.channel	= HCI_CHANNEL_CONTROL,
10492 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10493 	.handlers	= mgmt_handlers,
10494 	.hdev_init	= mgmt_init_hdev,
10495 };
10496 
mgmt_init(void)10497 int mgmt_init(void)
10498 {
10499 	return hci_mgmt_chan_register(&chan);
10500 }
10501 
mgmt_exit(void)10502 void mgmt_exit(void)
10503 {
10504 	hci_mgmt_chan_unregister(&chan);
10505 }
10506 
mgmt_cleanup(struct sock * sk)10507 void mgmt_cleanup(struct sock *sk)
10508 {
10509 	struct mgmt_mesh_tx *mesh_tx;
10510 	struct hci_dev *hdev;
10511 
10512 	read_lock(&hci_dev_list_lock);
10513 
10514 	list_for_each_entry(hdev, &hci_dev_list, list) {
10515 		do {
10516 			mesh_tx = mgmt_mesh_next(hdev, sk);
10517 
10518 			if (mesh_tx)
10519 				mesh_send_complete(hdev, mesh_tx, true);
10520 		} while (mesh_tx);
10521 	}
10522 
10523 	read_unlock(&hci_dev_list_lock);
10524 }
10525