1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
46
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855
856 return settings;
857 }
858
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 u32 settings = 0;
862
863 if (hdev_is_powered(hdev))
864 settings |= MGMT_SETTING_POWERED;
865
866 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 settings |= MGMT_SETTING_CONNECTABLE;
868
869 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 settings |= MGMT_SETTING_DISCOVERABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 settings |= MGMT_SETTING_BONDABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 settings |= MGMT_SETTING_BREDR;
880
881 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 settings |= MGMT_SETTING_LE;
883
884 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 settings |= MGMT_SETTING_LINK_SECURITY;
886
887 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 settings |= MGMT_SETTING_SSP;
889
890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 settings |= MGMT_SETTING_ADVERTISING;
892
893 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 settings |= MGMT_SETTING_SECURE_CONN;
895
896 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 settings |= MGMT_SETTING_DEBUG_KEYS;
898
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 settings |= MGMT_SETTING_PRIVACY;
901
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
905 *
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
909 *
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
912 * be evaluated.
913 */
914 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 settings |= MGMT_SETTING_STATIC_ADDRESS;
919 }
920
921 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923
924 if (cis_central_capable(hdev))
925 settings |= MGMT_SETTING_CIS_CENTRAL;
926
927 if (cis_peripheral_capable(hdev))
928 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929
930 if (bis_capable(hdev))
931 settings |= MGMT_SETTING_ISO_BROADCASTER;
932
933 if (sync_recv_capable(hdev))
934 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935
936 return settings;
937 }
938
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 struct mgmt_pending_cmd *cmd;
947
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
950 */
951 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (cmd) {
953 struct mgmt_mode *cp = cmd->param;
954 if (cp->val == 0x01)
955 return LE_AD_GENERAL;
956 else if (cp->val == 0x02)
957 return LE_AD_LIMITED;
958 } else {
959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 return LE_AD_LIMITED;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 return LE_AD_GENERAL;
963 }
964
965 return 0;
966 }
967
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 struct mgmt_pending_cmd *cmd;
971
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
974 */
975 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 if (cmd) {
977 struct mgmt_mode *cp = cmd->param;
978
979 return cp->val;
980 }
981
982 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 hci_update_eir_sync(hdev);
988 hci_update_class_sync(hdev);
989
990 return 0;
991 }
992
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 service_cache.work);
997
998 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 return;
1000
1001 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1008 * function.
1009 */
1010 if (ext_adv_capable(hdev))
1011 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 else
1013 return hci_enable_advertising_sync(hdev);
1014 }
1015
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 rpa_expired.work);
1020
1021 bt_dev_dbg(hdev, "");
1022
1023 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024
1025 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 return;
1027
1028 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 discov_off.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_lock(hdev);
1041
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1046 */
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1050
1051 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052
1053 mgmt_new_settings(hdev);
1054
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 u8 handle = mesh_tx->handle;
1064
1065 if (!silent)
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1068
1069 mgmt_mesh_remove(mesh_tx);
1070 }
1071
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 struct mgmt_mesh_tx *mesh_tx;
1075
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079
1080 if (mesh_tx)
1081 mesh_send_complete(hdev, mesh_tx, false);
1082
1083 return 0;
1084 }
1085
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (!mesh_tx)
1093 return;
1094
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1097
1098 if (err < 0)
1099 mesh_send_complete(hdev, mesh_tx, false);
1100 else
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1108
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 return;
1111
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 return;
1119
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1130 * it
1131 */
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1139 {
1140 struct mgmt_rp_read_info rp;
1141
1142 bt_dev_dbg(hdev, "sock %p", sk);
1143
1144 hci_dev_lock(hdev);
1145
1146 memset(&rp, 0, sizeof(rp));
1147
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1157
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160
1161 hci_dev_unlock(hdev);
1162
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 sizeof(rp));
1165 }
1166
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 u16 eir_len = 0;
1170 size_t name_len;
1171
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1175
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 hdev->appearance);
1179
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1183
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1187
1188 return eir_len;
1189 }
1190
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1193 {
1194 char buf[512];
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 u16 eir_len;
1197
1198 bt_dev_dbg(hdev, "sock %p", sk);
1199
1200 memset(&buf, 0, sizeof(buf));
1201
1202 hci_dev_lock(hdev);
1203
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211
1212
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1215
1216 hci_dev_unlock(hdev);
1217
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1221 * is used.
1222 */
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1229 }
1230
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 char buf[512];
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 u16 eir_len;
1236
1237 memset(buf, 0, sizeof(buf));
1238
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1241
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 struct mgmt_ev_advertising_added ev;
1258
1259 ev.instance = instance;
1260
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 u8 instance)
1266 {
1267 struct mgmt_ev_advertising_removed ev;
1268
1269 ev.instance = instance;
1270
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1279 }
1280 }
1281
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 struct hci_conn_params *p;
1286
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1290 */
1291 hci_pend_le_list_del_init(p);
1292
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 break;
1298 case HCI_AUTO_CONN_REPORT:
1299 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 break;
1301 default:
1302 break;
1303 }
1304 }
1305 }
1306
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1319
1320 /* Make sure cmd still outstanding. */
1321 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1322 return;
1323
1324 cp = cmd->param;
1325
1326 bt_dev_dbg(hdev, "err %d", err);
1327
1328 if (!err) {
1329 if (cp->val) {
1330 hci_dev_lock(hdev);
1331 restart_le_actions(hdev);
1332 hci_update_passive_scan(hdev);
1333 hci_dev_unlock(hdev);
1334 }
1335
1336 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1337
1338 /* Only call new_setting for power on as power off is deferred
1339 * to hdev->power_off work which does call hci_dev_do_close.
1340 */
1341 if (cp->val)
1342 new_settings(hdev, cmd->sk);
1343 } else {
1344 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1345 mgmt_status(err));
1346 }
1347
1348 mgmt_pending_remove(cmd);
1349 }
1350
set_powered_sync(struct hci_dev * hdev,void * data)1351 static int set_powered_sync(struct hci_dev *hdev, void *data)
1352 {
1353 struct mgmt_pending_cmd *cmd = data;
1354 struct mgmt_mode *cp = cmd->param;
1355
1356 BT_DBG("%s", hdev->name);
1357
1358 return hci_set_powered_sync(hdev, cp->val);
1359 }
1360
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1361 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1362 u16 len)
1363 {
1364 struct mgmt_mode *cp = data;
1365 struct mgmt_pending_cmd *cmd;
1366 int err;
1367
1368 bt_dev_dbg(hdev, "sock %p", sk);
1369
1370 if (cp->val != 0x00 && cp->val != 0x01)
1371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1372 MGMT_STATUS_INVALID_PARAMS);
1373
1374 hci_dev_lock(hdev);
1375
1376 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 MGMT_STATUS_BUSY);
1379 goto failed;
1380 }
1381
1382 if (!!cp->val == hdev_is_powered(hdev)) {
1383 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1384 goto failed;
1385 }
1386
1387 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1388 if (!cmd) {
1389 err = -ENOMEM;
1390 goto failed;
1391 }
1392
1393 /* Cancel potentially blocking sync operation before power off */
1394 if (cp->val == 0x00) {
1395 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1396 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1397 mgmt_set_powered_complete);
1398 } else {
1399 /* Use hci_cmd_sync_submit since hdev might not be running */
1400 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1401 mgmt_set_powered_complete);
1402 }
1403
1404 if (err < 0)
1405 mgmt_pending_remove(cmd);
1406
1407 failed:
1408 hci_dev_unlock(hdev);
1409 return err;
1410 }
1411
mgmt_new_settings(struct hci_dev * hdev)1412 int mgmt_new_settings(struct hci_dev *hdev)
1413 {
1414 return new_settings(hdev, NULL);
1415 }
1416
1417 struct cmd_lookup {
1418 struct sock *sk;
1419 struct hci_dev *hdev;
1420 u8 mgmt_status;
1421 };
1422
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1423 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1424 {
1425 struct cmd_lookup *match = data;
1426
1427 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1428
1429 list_del(&cmd->list);
1430
1431 if (match->sk == NULL) {
1432 match->sk = cmd->sk;
1433 sock_hold(match->sk);
1434 }
1435
1436 mgmt_pending_free(cmd);
1437 }
1438
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1439 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1440 {
1441 u8 *status = data;
1442
1443 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1444 mgmt_pending_remove(cmd);
1445 }
1446
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1447 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 {
1449 if (cmd->cmd_complete) {
1450 u8 *status = data;
1451
1452 cmd->cmd_complete(cmd, *status);
1453 mgmt_pending_remove(cmd);
1454
1455 return;
1456 }
1457
1458 cmd_status_rsp(cmd, data);
1459 }
1460
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1461 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1462 {
1463 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1464 cmd->param, cmd->param_len);
1465 }
1466
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1467 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1468 {
1469 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1470 cmd->param, sizeof(struct mgmt_addr_info));
1471 }
1472
mgmt_bredr_support(struct hci_dev * hdev)1473 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1474 {
1475 if (!lmp_bredr_capable(hdev))
1476 return MGMT_STATUS_NOT_SUPPORTED;
1477 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1478 return MGMT_STATUS_REJECTED;
1479 else
1480 return MGMT_STATUS_SUCCESS;
1481 }
1482
mgmt_le_support(struct hci_dev * hdev)1483 static u8 mgmt_le_support(struct hci_dev *hdev)
1484 {
1485 if (!lmp_le_capable(hdev))
1486 return MGMT_STATUS_NOT_SUPPORTED;
1487 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1488 return MGMT_STATUS_REJECTED;
1489 else
1490 return MGMT_STATUS_SUCCESS;
1491 }
1492
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1493 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1494 int err)
1495 {
1496 struct mgmt_pending_cmd *cmd = data;
1497
1498 bt_dev_dbg(hdev, "err %d", err);
1499
1500 /* Make sure cmd still outstanding. */
1501 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1502 return;
1503
1504 hci_dev_lock(hdev);
1505
1506 if (err) {
1507 u8 mgmt_err = mgmt_status(err);
1508 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1509 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1510 goto done;
1511 }
1512
1513 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 hdev->discov_timeout > 0) {
1515 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1516 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1517 }
1518
1519 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1520 new_settings(hdev, cmd->sk);
1521
1522 done:
1523 mgmt_pending_remove(cmd);
1524 hci_dev_unlock(hdev);
1525 }
1526
set_discoverable_sync(struct hci_dev * hdev,void * data)1527 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1528 {
1529 BT_DBG("%s", hdev->name);
1530
1531 return hci_update_discoverable_sync(hdev);
1532 }
1533
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1534 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1535 u16 len)
1536 {
1537 struct mgmt_cp_set_discoverable *cp = data;
1538 struct mgmt_pending_cmd *cmd;
1539 u16 timeout;
1540 int err;
1541
1542 bt_dev_dbg(hdev, "sock %p", sk);
1543
1544 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1545 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1546 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1547 MGMT_STATUS_REJECTED);
1548
1549 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1550 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1551 MGMT_STATUS_INVALID_PARAMS);
1552
1553 timeout = __le16_to_cpu(cp->timeout);
1554
1555 /* Disabling discoverable requires that no timeout is set,
1556 * and enabling limited discoverable requires a timeout.
1557 */
1558 if ((cp->val == 0x00 && timeout > 0) ||
1559 (cp->val == 0x02 && timeout == 0))
1560 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1561 MGMT_STATUS_INVALID_PARAMS);
1562
1563 hci_dev_lock(hdev);
1564
1565 if (!hdev_is_powered(hdev) && timeout > 0) {
1566 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_NOT_POWERED);
1568 goto failed;
1569 }
1570
1571 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1572 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_BUSY);
1575 goto failed;
1576 }
1577
1578 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_REJECTED);
1581 goto failed;
1582 }
1583
1584 if (hdev->advertising_paused) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_BUSY);
1587 goto failed;
1588 }
1589
1590 if (!hdev_is_powered(hdev)) {
1591 bool changed = false;
1592
1593 /* Setting limited discoverable when powered off is
1594 * not a valid operation since it requires a timeout
1595 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1596 */
1597 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1598 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1599 changed = true;
1600 }
1601
1602 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1603 if (err < 0)
1604 goto failed;
1605
1606 if (changed)
1607 err = new_settings(hdev, sk);
1608
1609 goto failed;
1610 }
1611
1612 /* If the current mode is the same, then just update the timeout
1613 * value with the new value. And if only the timeout gets updated,
1614 * then no need for any HCI transactions.
1615 */
1616 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1617 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1618 HCI_LIMITED_DISCOVERABLE)) {
1619 cancel_delayed_work(&hdev->discov_off);
1620 hdev->discov_timeout = timeout;
1621
1622 if (cp->val && hdev->discov_timeout > 0) {
1623 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1624 queue_delayed_work(hdev->req_workqueue,
1625 &hdev->discov_off, to);
1626 }
1627
1628 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1629 goto failed;
1630 }
1631
1632 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1633 if (!cmd) {
1634 err = -ENOMEM;
1635 goto failed;
1636 }
1637
1638 /* Cancel any potential discoverable timeout that might be
1639 * still active and store new timeout value. The arming of
1640 * the timeout happens in the complete handler.
1641 */
1642 cancel_delayed_work(&hdev->discov_off);
1643 hdev->discov_timeout = timeout;
1644
1645 if (cp->val)
1646 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1647 else
1648 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1649
1650 /* Limited discoverable mode */
1651 if (cp->val == 0x02)
1652 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1653 else
1654 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1655
1656 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1657 mgmt_set_discoverable_complete);
1658
1659 if (err < 0)
1660 mgmt_pending_remove(cmd);
1661
1662 failed:
1663 hci_dev_unlock(hdev);
1664 return err;
1665 }
1666
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1667 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1668 int err)
1669 {
1670 struct mgmt_pending_cmd *cmd = data;
1671
1672 bt_dev_dbg(hdev, "err %d", err);
1673
1674 /* Make sure cmd still outstanding. */
1675 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1676 return;
1677
1678 hci_dev_lock(hdev);
1679
1680 if (err) {
1681 u8 mgmt_err = mgmt_status(err);
1682 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1683 goto done;
1684 }
1685
1686 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1687 new_settings(hdev, cmd->sk);
1688
1689 done:
1690 if (cmd)
1691 mgmt_pending_remove(cmd);
1692
1693 hci_dev_unlock(hdev);
1694 }
1695
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1696 static int set_connectable_update_settings(struct hci_dev *hdev,
1697 struct sock *sk, u8 val)
1698 {
1699 bool changed = false;
1700 int err;
1701
1702 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1703 changed = true;
1704
1705 if (val) {
1706 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1707 } else {
1708 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1709 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1710 }
1711
1712 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1713 if (err < 0)
1714 return err;
1715
1716 if (changed) {
1717 hci_update_scan(hdev);
1718 hci_update_passive_scan(hdev);
1719 return new_settings(hdev, sk);
1720 }
1721
1722 return 0;
1723 }
1724
set_connectable_sync(struct hci_dev * hdev,void * data)1725 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1726 {
1727 BT_DBG("%s", hdev->name);
1728
1729 return hci_update_connectable_sync(hdev);
1730 }
1731
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1732 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1733 u16 len)
1734 {
1735 struct mgmt_mode *cp = data;
1736 struct mgmt_pending_cmd *cmd;
1737 int err;
1738
1739 bt_dev_dbg(hdev, "sock %p", sk);
1740
1741 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1742 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1744 MGMT_STATUS_REJECTED);
1745
1746 if (cp->val != 0x00 && cp->val != 0x01)
1747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1748 MGMT_STATUS_INVALID_PARAMS);
1749
1750 hci_dev_lock(hdev);
1751
1752 if (!hdev_is_powered(hdev)) {
1753 err = set_connectable_update_settings(hdev, sk, cp->val);
1754 goto failed;
1755 }
1756
1757 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1758 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 MGMT_STATUS_BUSY);
1761 goto failed;
1762 }
1763
1764 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1765 if (!cmd) {
1766 err = -ENOMEM;
1767 goto failed;
1768 }
1769
1770 if (cp->val) {
1771 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1772 } else {
1773 if (hdev->discov_timeout > 0)
1774 cancel_delayed_work(&hdev->discov_off);
1775
1776 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1777 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1778 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1779 }
1780
1781 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1782 mgmt_set_connectable_complete);
1783
1784 if (err < 0)
1785 mgmt_pending_remove(cmd);
1786
1787 failed:
1788 hci_dev_unlock(hdev);
1789 return err;
1790 }
1791
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1792 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1793 u16 len)
1794 {
1795 struct mgmt_mode *cp = data;
1796 bool changed;
1797 int err;
1798
1799 bt_dev_dbg(hdev, "sock %p", sk);
1800
1801 if (cp->val != 0x00 && cp->val != 0x01)
1802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1803 MGMT_STATUS_INVALID_PARAMS);
1804
1805 hci_dev_lock(hdev);
1806
1807 if (cp->val)
1808 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1809 else
1810 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1811
1812 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1813 if (err < 0)
1814 goto unlock;
1815
1816 if (changed) {
1817 /* In limited privacy mode the change of bondable mode
1818 * may affect the local advertising address.
1819 */
1820 hci_update_discoverable(hdev);
1821
1822 err = new_settings(hdev, sk);
1823 }
1824
1825 unlock:
1826 hci_dev_unlock(hdev);
1827 return err;
1828 }
1829
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1830 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1831 u16 len)
1832 {
1833 struct mgmt_mode *cp = data;
1834 struct mgmt_pending_cmd *cmd;
1835 u8 val, status;
1836 int err;
1837
1838 bt_dev_dbg(hdev, "sock %p", sk);
1839
1840 status = mgmt_bredr_support(hdev);
1841 if (status)
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1843 status);
1844
1845 if (cp->val != 0x00 && cp->val != 0x01)
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1847 MGMT_STATUS_INVALID_PARAMS);
1848
1849 hci_dev_lock(hdev);
1850
1851 if (!hdev_is_powered(hdev)) {
1852 bool changed = false;
1853
1854 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1855 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1856 changed = true;
1857 }
1858
1859 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1860 if (err < 0)
1861 goto failed;
1862
1863 if (changed)
1864 err = new_settings(hdev, sk);
1865
1866 goto failed;
1867 }
1868
1869 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1870 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1871 MGMT_STATUS_BUSY);
1872 goto failed;
1873 }
1874
1875 val = !!cp->val;
1876
1877 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 goto failed;
1880 }
1881
1882 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1883 if (!cmd) {
1884 err = -ENOMEM;
1885 goto failed;
1886 }
1887
1888 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1889 if (err < 0) {
1890 mgmt_pending_remove(cmd);
1891 goto failed;
1892 }
1893
1894 failed:
1895 hci_dev_unlock(hdev);
1896 return err;
1897 }
1898
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1899 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1900 {
1901 struct cmd_lookup match = { NULL, hdev };
1902 struct mgmt_pending_cmd *cmd = data;
1903 struct mgmt_mode *cp = cmd->param;
1904 u8 enable = cp->val;
1905 bool changed;
1906
1907 /* Make sure cmd still outstanding. */
1908 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1909 return;
1910
1911 if (err) {
1912 u8 mgmt_err = mgmt_status(err);
1913
1914 if (enable && hci_dev_test_and_clear_flag(hdev,
1915 HCI_SSP_ENABLED)) {
1916 new_settings(hdev, NULL);
1917 }
1918
1919 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1920 &mgmt_err);
1921 return;
1922 }
1923
1924 if (enable) {
1925 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1926 } else {
1927 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1928 }
1929
1930 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1931
1932 if (changed)
1933 new_settings(hdev, match.sk);
1934
1935 if (match.sk)
1936 sock_put(match.sk);
1937
1938 hci_update_eir_sync(hdev);
1939 }
1940
set_ssp_sync(struct hci_dev * hdev,void * data)1941 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1942 {
1943 struct mgmt_pending_cmd *cmd = data;
1944 struct mgmt_mode *cp = cmd->param;
1945 bool changed = false;
1946 int err;
1947
1948 if (cp->val)
1949 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1950
1951 err = hci_write_ssp_mode_sync(hdev, cp->val);
1952
1953 if (!err && changed)
1954 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1955
1956 return err;
1957 }
1958
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1959 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1960 {
1961 struct mgmt_mode *cp = data;
1962 struct mgmt_pending_cmd *cmd;
1963 u8 status;
1964 int err;
1965
1966 bt_dev_dbg(hdev, "sock %p", sk);
1967
1968 status = mgmt_bredr_support(hdev);
1969 if (status)
1970 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1971
1972 if (!lmp_ssp_capable(hdev))
1973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1974 MGMT_STATUS_NOT_SUPPORTED);
1975
1976 if (cp->val != 0x00 && cp->val != 0x01)
1977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1978 MGMT_STATUS_INVALID_PARAMS);
1979
1980 hci_dev_lock(hdev);
1981
1982 if (!hdev_is_powered(hdev)) {
1983 bool changed;
1984
1985 if (cp->val) {
1986 changed = !hci_dev_test_and_set_flag(hdev,
1987 HCI_SSP_ENABLED);
1988 } else {
1989 changed = hci_dev_test_and_clear_flag(hdev,
1990 HCI_SSP_ENABLED);
1991 }
1992
1993 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1994 if (err < 0)
1995 goto failed;
1996
1997 if (changed)
1998 err = new_settings(hdev, sk);
1999
2000 goto failed;
2001 }
2002
2003 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2005 MGMT_STATUS_BUSY);
2006 goto failed;
2007 }
2008
2009 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2010 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2011 goto failed;
2012 }
2013
2014 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2015 if (!cmd)
2016 err = -ENOMEM;
2017 else
2018 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2019 set_ssp_complete);
2020
2021 if (err < 0) {
2022 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2023 MGMT_STATUS_FAILED);
2024
2025 if (cmd)
2026 mgmt_pending_remove(cmd);
2027 }
2028
2029 failed:
2030 hci_dev_unlock(hdev);
2031 return err;
2032 }
2033
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2034 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2035 {
2036 bt_dev_dbg(hdev, "sock %p", sk);
2037
2038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2039 MGMT_STATUS_NOT_SUPPORTED);
2040 }
2041
set_le_complete(struct hci_dev * hdev,void * data,int err)2042 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2043 {
2044 struct cmd_lookup match = { NULL, hdev };
2045 u8 status = mgmt_status(err);
2046
2047 bt_dev_dbg(hdev, "err %d", err);
2048
2049 if (status) {
2050 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2051 &status);
2052 return;
2053 }
2054
2055 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2056
2057 new_settings(hdev, match.sk);
2058
2059 if (match.sk)
2060 sock_put(match.sk);
2061 }
2062
set_le_sync(struct hci_dev * hdev,void * data)2063 static int set_le_sync(struct hci_dev *hdev, void *data)
2064 {
2065 struct mgmt_pending_cmd *cmd = data;
2066 struct mgmt_mode *cp = cmd->param;
2067 u8 val = !!cp->val;
2068 int err;
2069
2070 if (!val) {
2071 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2072
2073 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2074 hci_disable_advertising_sync(hdev);
2075
2076 if (ext_adv_capable(hdev))
2077 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2078 } else {
2079 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2080 }
2081
2082 err = hci_write_le_host_supported_sync(hdev, val, 0);
2083
2084 /* Make sure the controller has a good default for
2085 * advertising data. Restrict the update to when LE
2086 * has actually been enabled. During power on, the
2087 * update in powered_update_hci will take care of it.
2088 */
2089 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2090 if (ext_adv_capable(hdev)) {
2091 int status;
2092
2093 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2094 if (!status)
2095 hci_update_scan_rsp_data_sync(hdev, 0x00);
2096 } else {
2097 hci_update_adv_data_sync(hdev, 0x00);
2098 hci_update_scan_rsp_data_sync(hdev, 0x00);
2099 }
2100
2101 hci_update_passive_scan(hdev);
2102 }
2103
2104 return err;
2105 }
2106
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2107 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2108 {
2109 struct mgmt_pending_cmd *cmd = data;
2110 u8 status = mgmt_status(err);
2111 struct sock *sk = cmd->sk;
2112
2113 if (status) {
2114 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2115 cmd_status_rsp, &status);
2116 return;
2117 }
2118
2119 mgmt_pending_remove(cmd);
2120 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2121 }
2122
set_mesh_sync(struct hci_dev * hdev,void * data)2123 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2124 {
2125 struct mgmt_pending_cmd *cmd = data;
2126 struct mgmt_cp_set_mesh *cp = cmd->param;
2127 size_t len = cmd->param_len;
2128
2129 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2130
2131 if (cp->enable)
2132 hci_dev_set_flag(hdev, HCI_MESH);
2133 else
2134 hci_dev_clear_flag(hdev, HCI_MESH);
2135
2136 len -= sizeof(*cp);
2137
2138 /* If filters don't fit, forward all adv pkts */
2139 if (len <= sizeof(hdev->mesh_ad_types))
2140 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2141
2142 hci_update_passive_scan_sync(hdev);
2143 return 0;
2144 }
2145
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2146 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2147 {
2148 struct mgmt_cp_set_mesh *cp = data;
2149 struct mgmt_pending_cmd *cmd;
2150 int err = 0;
2151
2152 bt_dev_dbg(hdev, "sock %p", sk);
2153
2154 if (!lmp_le_capable(hdev) ||
2155 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2157 MGMT_STATUS_NOT_SUPPORTED);
2158
2159 if (cp->enable != 0x00 && cp->enable != 0x01)
2160 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2161 MGMT_STATUS_INVALID_PARAMS);
2162
2163 hci_dev_lock(hdev);
2164
2165 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2166 if (!cmd)
2167 err = -ENOMEM;
2168 else
2169 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2170 set_mesh_complete);
2171
2172 if (err < 0) {
2173 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2174 MGMT_STATUS_FAILED);
2175
2176 if (cmd)
2177 mgmt_pending_remove(cmd);
2178 }
2179
2180 hci_dev_unlock(hdev);
2181 return err;
2182 }
2183
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2184 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2185 {
2186 struct mgmt_mesh_tx *mesh_tx = data;
2187 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2188 unsigned long mesh_send_interval;
2189 u8 mgmt_err = mgmt_status(err);
2190
2191 /* Report any errors here, but don't report completion */
2192
2193 if (mgmt_err) {
2194 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2195 /* Send Complete Error Code for handle */
2196 mesh_send_complete(hdev, mesh_tx, false);
2197 return;
2198 }
2199
2200 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2201 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2202 mesh_send_interval);
2203 }
2204
mesh_send_sync(struct hci_dev * hdev,void * data)2205 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2206 {
2207 struct mgmt_mesh_tx *mesh_tx = data;
2208 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2209 struct adv_info *adv, *next_instance;
2210 u8 instance = hdev->le_num_of_adv_sets + 1;
2211 u16 timeout, duration;
2212 int err = 0;
2213
2214 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2215 return MGMT_STATUS_BUSY;
2216
2217 timeout = 1000;
2218 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2219 adv = hci_add_adv_instance(hdev, instance, 0,
2220 send->adv_data_len, send->adv_data,
2221 0, NULL,
2222 timeout, duration,
2223 HCI_ADV_TX_POWER_NO_PREFERENCE,
2224 hdev->le_adv_min_interval,
2225 hdev->le_adv_max_interval,
2226 mesh_tx->handle);
2227
2228 if (!IS_ERR(adv))
2229 mesh_tx->instance = instance;
2230 else
2231 err = PTR_ERR(adv);
2232
2233 if (hdev->cur_adv_instance == instance) {
2234 /* If the currently advertised instance is being changed then
2235 * cancel the current advertising and schedule the next
2236 * instance. If there is only one instance then the overridden
2237 * advertising data will be visible right away.
2238 */
2239 cancel_adv_timeout(hdev);
2240
2241 next_instance = hci_get_next_instance(hdev, instance);
2242 if (next_instance)
2243 instance = next_instance->instance;
2244 else
2245 instance = 0;
2246 } else if (hdev->adv_instance_timeout) {
2247 /* Immediately advertise the new instance if no other, or
2248 * let it go naturally from queue if ADV is already happening
2249 */
2250 instance = 0;
2251 }
2252
2253 if (instance)
2254 return hci_schedule_adv_instance_sync(hdev, instance, true);
2255
2256 return err;
2257 }
2258
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2259 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2260 {
2261 struct mgmt_rp_mesh_read_features *rp = data;
2262
2263 if (rp->used_handles >= rp->max_handles)
2264 return;
2265
2266 rp->handles[rp->used_handles++] = mesh_tx->handle;
2267 }
2268
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2269 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2270 void *data, u16 len)
2271 {
2272 struct mgmt_rp_mesh_read_features rp;
2273
2274 if (!lmp_le_capable(hdev) ||
2275 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2277 MGMT_STATUS_NOT_SUPPORTED);
2278
2279 memset(&rp, 0, sizeof(rp));
2280 rp.index = cpu_to_le16(hdev->id);
2281 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2282 rp.max_handles = MESH_HANDLES_MAX;
2283
2284 hci_dev_lock(hdev);
2285
2286 if (rp.max_handles)
2287 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2288
2289 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2290 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2291
2292 hci_dev_unlock(hdev);
2293 return 0;
2294 }
2295
send_cancel(struct hci_dev * hdev,void * data)2296 static int send_cancel(struct hci_dev *hdev, void *data)
2297 {
2298 struct mgmt_pending_cmd *cmd = data;
2299 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2300 struct mgmt_mesh_tx *mesh_tx;
2301
2302 if (!cancel->handle) {
2303 do {
2304 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2305
2306 if (mesh_tx)
2307 mesh_send_complete(hdev, mesh_tx, false);
2308 } while (mesh_tx);
2309 } else {
2310 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2311
2312 if (mesh_tx && mesh_tx->sk == cmd->sk)
2313 mesh_send_complete(hdev, mesh_tx, false);
2314 }
2315
2316 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2317 0, NULL, 0);
2318 mgmt_pending_free(cmd);
2319
2320 return 0;
2321 }
2322
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2323 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2324 void *data, u16 len)
2325 {
2326 struct mgmt_pending_cmd *cmd;
2327 int err;
2328
2329 if (!lmp_le_capable(hdev) ||
2330 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2331 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2332 MGMT_STATUS_NOT_SUPPORTED);
2333
2334 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2336 MGMT_STATUS_REJECTED);
2337
2338 hci_dev_lock(hdev);
2339 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2340 if (!cmd)
2341 err = -ENOMEM;
2342 else
2343 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2344
2345 if (err < 0) {
2346 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2347 MGMT_STATUS_FAILED);
2348
2349 if (cmd)
2350 mgmt_pending_free(cmd);
2351 }
2352
2353 hci_dev_unlock(hdev);
2354 return err;
2355 }
2356
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2357 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2358 {
2359 struct mgmt_mesh_tx *mesh_tx;
2360 struct mgmt_cp_mesh_send *send = data;
2361 struct mgmt_rp_mesh_read_features rp;
2362 bool sending;
2363 int err = 0;
2364
2365 if (!lmp_le_capable(hdev) ||
2366 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2368 MGMT_STATUS_NOT_SUPPORTED);
2369 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2370 len <= MGMT_MESH_SEND_SIZE ||
2371 len > (MGMT_MESH_SEND_SIZE + 31))
2372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2373 MGMT_STATUS_REJECTED);
2374
2375 hci_dev_lock(hdev);
2376
2377 memset(&rp, 0, sizeof(rp));
2378 rp.max_handles = MESH_HANDLES_MAX;
2379
2380 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2381
2382 if (rp.max_handles <= rp.used_handles) {
2383 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2384 MGMT_STATUS_BUSY);
2385 goto done;
2386 }
2387
2388 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2389 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2390
2391 if (!mesh_tx)
2392 err = -ENOMEM;
2393 else if (!sending)
2394 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2395 mesh_send_start_complete);
2396
2397 if (err < 0) {
2398 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2399 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 MGMT_STATUS_FAILED);
2401
2402 if (mesh_tx) {
2403 if (sending)
2404 mgmt_mesh_remove(mesh_tx);
2405 }
2406 } else {
2407 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2408
2409 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2410 &mesh_tx->handle, 1);
2411 }
2412
2413 done:
2414 hci_dev_unlock(hdev);
2415 return err;
2416 }
2417
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2418 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2419 {
2420 struct mgmt_mode *cp = data;
2421 struct mgmt_pending_cmd *cmd;
2422 int err;
2423 u8 val, enabled;
2424
2425 bt_dev_dbg(hdev, "sock %p", sk);
2426
2427 if (!lmp_le_capable(hdev))
2428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2429 MGMT_STATUS_NOT_SUPPORTED);
2430
2431 if (cp->val != 0x00 && cp->val != 0x01)
2432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2433 MGMT_STATUS_INVALID_PARAMS);
2434
2435 /* Bluetooth single mode LE only controllers or dual-mode
2436 * controllers configured as LE only devices, do not allow
2437 * switching LE off. These have either LE enabled explicitly
2438 * or BR/EDR has been previously switched off.
2439 *
2440 * When trying to enable an already enabled LE, then gracefully
2441 * send a positive response. Trying to disable it however will
2442 * result into rejection.
2443 */
2444 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2445 if (cp->val == 0x01)
2446 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2447
2448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2449 MGMT_STATUS_REJECTED);
2450 }
2451
2452 hci_dev_lock(hdev);
2453
2454 val = !!cp->val;
2455 enabled = lmp_host_le_capable(hdev);
2456
2457 if (!hdev_is_powered(hdev) || val == enabled) {
2458 bool changed = false;
2459
2460 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2461 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2462 changed = true;
2463 }
2464
2465 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2466 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2467 changed = true;
2468 }
2469
2470 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2471 if (err < 0)
2472 goto unlock;
2473
2474 if (changed)
2475 err = new_settings(hdev, sk);
2476
2477 goto unlock;
2478 }
2479
2480 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2481 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2482 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2483 MGMT_STATUS_BUSY);
2484 goto unlock;
2485 }
2486
2487 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2488 if (!cmd)
2489 err = -ENOMEM;
2490 else
2491 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2492 set_le_complete);
2493
2494 if (err < 0) {
2495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2496 MGMT_STATUS_FAILED);
2497
2498 if (cmd)
2499 mgmt_pending_remove(cmd);
2500 }
2501
2502 unlock:
2503 hci_dev_unlock(hdev);
2504 return err;
2505 }
2506
2507 /* This is a helper function to test for pending mgmt commands that can
2508 * cause CoD or EIR HCI commands. We can only allow one such pending
2509 * mgmt command at a time since otherwise we cannot easily track what
2510 * the current values are, will be, and based on that calculate if a new
2511 * HCI command needs to be sent and if yes with what value.
2512 */
pending_eir_or_class(struct hci_dev * hdev)2513 static bool pending_eir_or_class(struct hci_dev *hdev)
2514 {
2515 struct mgmt_pending_cmd *cmd;
2516
2517 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2518 switch (cmd->opcode) {
2519 case MGMT_OP_ADD_UUID:
2520 case MGMT_OP_REMOVE_UUID:
2521 case MGMT_OP_SET_DEV_CLASS:
2522 case MGMT_OP_SET_POWERED:
2523 return true;
2524 }
2525 }
2526
2527 return false;
2528 }
2529
2530 static const u8 bluetooth_base_uuid[] = {
2531 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2532 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2533 };
2534
get_uuid_size(const u8 * uuid)2535 static u8 get_uuid_size(const u8 *uuid)
2536 {
2537 u32 val;
2538
2539 if (memcmp(uuid, bluetooth_base_uuid, 12))
2540 return 128;
2541
2542 val = get_unaligned_le32(&uuid[12]);
2543 if (val > 0xffff)
2544 return 32;
2545
2546 return 16;
2547 }
2548
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2549 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2550 {
2551 struct mgmt_pending_cmd *cmd = data;
2552
2553 bt_dev_dbg(hdev, "err %d", err);
2554
2555 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2556 mgmt_status(err), hdev->dev_class, 3);
2557
2558 mgmt_pending_free(cmd);
2559 }
2560
add_uuid_sync(struct hci_dev * hdev,void * data)2561 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2562 {
2563 int err;
2564
2565 err = hci_update_class_sync(hdev);
2566 if (err)
2567 return err;
2568
2569 return hci_update_eir_sync(hdev);
2570 }
2571
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2572 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2573 {
2574 struct mgmt_cp_add_uuid *cp = data;
2575 struct mgmt_pending_cmd *cmd;
2576 struct bt_uuid *uuid;
2577 int err;
2578
2579 bt_dev_dbg(hdev, "sock %p", sk);
2580
2581 hci_dev_lock(hdev);
2582
2583 if (pending_eir_or_class(hdev)) {
2584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2585 MGMT_STATUS_BUSY);
2586 goto failed;
2587 }
2588
2589 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2590 if (!uuid) {
2591 err = -ENOMEM;
2592 goto failed;
2593 }
2594
2595 memcpy(uuid->uuid, cp->uuid, 16);
2596 uuid->svc_hint = cp->svc_hint;
2597 uuid->size = get_uuid_size(cp->uuid);
2598
2599 list_add_tail(&uuid->list, &hdev->uuids);
2600
2601 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2602 if (!cmd) {
2603 err = -ENOMEM;
2604 goto failed;
2605 }
2606
2607 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2608 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2609 */
2610 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2611 mgmt_class_complete);
2612 if (err < 0) {
2613 mgmt_pending_free(cmd);
2614 goto failed;
2615 }
2616
2617 failed:
2618 hci_dev_unlock(hdev);
2619 return err;
2620 }
2621
enable_service_cache(struct hci_dev * hdev)2622 static bool enable_service_cache(struct hci_dev *hdev)
2623 {
2624 if (!hdev_is_powered(hdev))
2625 return false;
2626
2627 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2628 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2629 CACHE_TIMEOUT);
2630 return true;
2631 }
2632
2633 return false;
2634 }
2635
remove_uuid_sync(struct hci_dev * hdev,void * data)2636 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2637 {
2638 int err;
2639
2640 err = hci_update_class_sync(hdev);
2641 if (err)
2642 return err;
2643
2644 return hci_update_eir_sync(hdev);
2645 }
2646
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2647 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2648 u16 len)
2649 {
2650 struct mgmt_cp_remove_uuid *cp = data;
2651 struct mgmt_pending_cmd *cmd;
2652 struct bt_uuid *match, *tmp;
2653 static const u8 bt_uuid_any[] = {
2654 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2655 };
2656 int err, found;
2657
2658 bt_dev_dbg(hdev, "sock %p", sk);
2659
2660 hci_dev_lock(hdev);
2661
2662 if (pending_eir_or_class(hdev)) {
2663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2664 MGMT_STATUS_BUSY);
2665 goto unlock;
2666 }
2667
2668 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2669 hci_uuids_clear(hdev);
2670
2671 if (enable_service_cache(hdev)) {
2672 err = mgmt_cmd_complete(sk, hdev->id,
2673 MGMT_OP_REMOVE_UUID,
2674 0, hdev->dev_class, 3);
2675 goto unlock;
2676 }
2677
2678 goto update_class;
2679 }
2680
2681 found = 0;
2682
2683 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2684 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2685 continue;
2686
2687 list_del(&match->list);
2688 kfree(match);
2689 found++;
2690 }
2691
2692 if (found == 0) {
2693 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2694 MGMT_STATUS_INVALID_PARAMS);
2695 goto unlock;
2696 }
2697
2698 update_class:
2699 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2700 if (!cmd) {
2701 err = -ENOMEM;
2702 goto unlock;
2703 }
2704
2705 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2706 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2707 */
2708 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2709 mgmt_class_complete);
2710 if (err < 0)
2711 mgmt_pending_free(cmd);
2712
2713 unlock:
2714 hci_dev_unlock(hdev);
2715 return err;
2716 }
2717
set_class_sync(struct hci_dev * hdev,void * data)2718 static int set_class_sync(struct hci_dev *hdev, void *data)
2719 {
2720 int err = 0;
2721
2722 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2723 cancel_delayed_work_sync(&hdev->service_cache);
2724 err = hci_update_eir_sync(hdev);
2725 }
2726
2727 if (err)
2728 return err;
2729
2730 return hci_update_class_sync(hdev);
2731 }
2732
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2733 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2734 u16 len)
2735 {
2736 struct mgmt_cp_set_dev_class *cp = data;
2737 struct mgmt_pending_cmd *cmd;
2738 int err;
2739
2740 bt_dev_dbg(hdev, "sock %p", sk);
2741
2742 if (!lmp_bredr_capable(hdev))
2743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2744 MGMT_STATUS_NOT_SUPPORTED);
2745
2746 hci_dev_lock(hdev);
2747
2748 if (pending_eir_or_class(hdev)) {
2749 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 MGMT_STATUS_BUSY);
2751 goto unlock;
2752 }
2753
2754 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 MGMT_STATUS_INVALID_PARAMS);
2757 goto unlock;
2758 }
2759
2760 hdev->major_class = cp->major;
2761 hdev->minor_class = cp->minor;
2762
2763 if (!hdev_is_powered(hdev)) {
2764 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2765 hdev->dev_class, 3);
2766 goto unlock;
2767 }
2768
2769 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2770 if (!cmd) {
2771 err = -ENOMEM;
2772 goto unlock;
2773 }
2774
2775 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2776 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2777 */
2778 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2779 mgmt_class_complete);
2780 if (err < 0)
2781 mgmt_pending_free(cmd);
2782
2783 unlock:
2784 hci_dev_unlock(hdev);
2785 return err;
2786 }
2787
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2788 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2789 u16 len)
2790 {
2791 struct mgmt_cp_load_link_keys *cp = data;
2792 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2793 sizeof(struct mgmt_link_key_info));
2794 u16 key_count, expected_len;
2795 bool changed;
2796 int i;
2797
2798 bt_dev_dbg(hdev, "sock %p", sk);
2799
2800 if (!lmp_bredr_capable(hdev))
2801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2802 MGMT_STATUS_NOT_SUPPORTED);
2803
2804 key_count = __le16_to_cpu(cp->key_count);
2805 if (key_count > max_key_count) {
2806 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2807 key_count);
2808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2809 MGMT_STATUS_INVALID_PARAMS);
2810 }
2811
2812 expected_len = struct_size(cp, keys, key_count);
2813 if (expected_len != len) {
2814 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2815 expected_len, len);
2816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2817 MGMT_STATUS_INVALID_PARAMS);
2818 }
2819
2820 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2822 MGMT_STATUS_INVALID_PARAMS);
2823
2824 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2825 key_count);
2826
2827 for (i = 0; i < key_count; i++) {
2828 struct mgmt_link_key_info *key = &cp->keys[i];
2829
2830 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2831 if (key->type > 0x08)
2832 return mgmt_cmd_status(sk, hdev->id,
2833 MGMT_OP_LOAD_LINK_KEYS,
2834 MGMT_STATUS_INVALID_PARAMS);
2835 }
2836
2837 hci_dev_lock(hdev);
2838
2839 hci_link_keys_clear(hdev);
2840
2841 if (cp->debug_keys)
2842 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2843 else
2844 changed = hci_dev_test_and_clear_flag(hdev,
2845 HCI_KEEP_DEBUG_KEYS);
2846
2847 if (changed)
2848 new_settings(hdev, NULL);
2849
2850 for (i = 0; i < key_count; i++) {
2851 struct mgmt_link_key_info *key = &cp->keys[i];
2852
2853 if (hci_is_blocked_key(hdev,
2854 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2855 key->val)) {
2856 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2857 &key->addr.bdaddr);
2858 continue;
2859 }
2860
2861 /* Always ignore debug keys and require a new pairing if
2862 * the user wants to use them.
2863 */
2864 if (key->type == HCI_LK_DEBUG_COMBINATION)
2865 continue;
2866
2867 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2868 key->type, key->pin_len, NULL);
2869 }
2870
2871 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2872
2873 hci_dev_unlock(hdev);
2874
2875 return 0;
2876 }
2877
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2878 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2879 u8 addr_type, struct sock *skip_sk)
2880 {
2881 struct mgmt_ev_device_unpaired ev;
2882
2883 bacpy(&ev.addr.bdaddr, bdaddr);
2884 ev.addr.type = addr_type;
2885
2886 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2887 skip_sk);
2888 }
2889
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2890 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2891 {
2892 struct mgmt_pending_cmd *cmd = data;
2893 struct mgmt_cp_unpair_device *cp = cmd->param;
2894
2895 if (!err)
2896 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2897
2898 cmd->cmd_complete(cmd, err);
2899 mgmt_pending_free(cmd);
2900 }
2901
unpair_device_sync(struct hci_dev * hdev,void * data)2902 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2903 {
2904 struct mgmt_pending_cmd *cmd = data;
2905 struct mgmt_cp_unpair_device *cp = cmd->param;
2906 struct hci_conn *conn;
2907
2908 if (cp->addr.type == BDADDR_BREDR)
2909 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2910 &cp->addr.bdaddr);
2911 else
2912 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2913 le_addr_type(cp->addr.type));
2914
2915 if (!conn)
2916 return 0;
2917
2918 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2919 }
2920
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2921 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2922 u16 len)
2923 {
2924 struct mgmt_cp_unpair_device *cp = data;
2925 struct mgmt_rp_unpair_device rp;
2926 struct hci_conn_params *params;
2927 struct mgmt_pending_cmd *cmd;
2928 struct hci_conn *conn;
2929 u8 addr_type;
2930 int err;
2931
2932 memset(&rp, 0, sizeof(rp));
2933 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2934 rp.addr.type = cp->addr.type;
2935
2936 if (!bdaddr_type_is_valid(cp->addr.type))
2937 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2938 MGMT_STATUS_INVALID_PARAMS,
2939 &rp, sizeof(rp));
2940
2941 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2942 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2943 MGMT_STATUS_INVALID_PARAMS,
2944 &rp, sizeof(rp));
2945
2946 hci_dev_lock(hdev);
2947
2948 if (!hdev_is_powered(hdev)) {
2949 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2950 MGMT_STATUS_NOT_POWERED, &rp,
2951 sizeof(rp));
2952 goto unlock;
2953 }
2954
2955 if (cp->addr.type == BDADDR_BREDR) {
2956 /* If disconnection is requested, then look up the
2957 * connection. If the remote device is connected, it
2958 * will be later used to terminate the link.
2959 *
2960 * Setting it to NULL explicitly will cause no
2961 * termination of the link.
2962 */
2963 if (cp->disconnect)
2964 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2965 &cp->addr.bdaddr);
2966 else
2967 conn = NULL;
2968
2969 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2970 if (err < 0) {
2971 err = mgmt_cmd_complete(sk, hdev->id,
2972 MGMT_OP_UNPAIR_DEVICE,
2973 MGMT_STATUS_NOT_PAIRED, &rp,
2974 sizeof(rp));
2975 goto unlock;
2976 }
2977
2978 goto done;
2979 }
2980
2981 /* LE address type */
2982 addr_type = le_addr_type(cp->addr.type);
2983
2984 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2985 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2986 if (err < 0) {
2987 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2988 MGMT_STATUS_NOT_PAIRED, &rp,
2989 sizeof(rp));
2990 goto unlock;
2991 }
2992
2993 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2994 if (!conn) {
2995 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2996 goto done;
2997 }
2998
2999
3000 /* Defer clearing up the connection parameters until closing to
3001 * give a chance of keeping them if a repairing happens.
3002 */
3003 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3004
3005 /* Disable auto-connection parameters if present */
3006 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3007 if (params) {
3008 if (params->explicit_connect)
3009 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3010 else
3011 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3012 }
3013
3014 /* If disconnection is not requested, then clear the connection
3015 * variable so that the link is not terminated.
3016 */
3017 if (!cp->disconnect)
3018 conn = NULL;
3019
3020 done:
3021 /* If the connection variable is set, then termination of the
3022 * link is requested.
3023 */
3024 if (!conn) {
3025 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3026 &rp, sizeof(rp));
3027 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3028 goto unlock;
3029 }
3030
3031 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3032 sizeof(*cp));
3033 if (!cmd) {
3034 err = -ENOMEM;
3035 goto unlock;
3036 }
3037
3038 cmd->cmd_complete = addr_cmd_complete;
3039
3040 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3041 unpair_device_complete);
3042 if (err < 0)
3043 mgmt_pending_free(cmd);
3044
3045 unlock:
3046 hci_dev_unlock(hdev);
3047 return err;
3048 }
3049
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3050 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3051 u16 len)
3052 {
3053 struct mgmt_cp_disconnect *cp = data;
3054 struct mgmt_rp_disconnect rp;
3055 struct mgmt_pending_cmd *cmd;
3056 struct hci_conn *conn;
3057 int err;
3058
3059 bt_dev_dbg(hdev, "sock %p", sk);
3060
3061 memset(&rp, 0, sizeof(rp));
3062 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3063 rp.addr.type = cp->addr.type;
3064
3065 if (!bdaddr_type_is_valid(cp->addr.type))
3066 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3067 MGMT_STATUS_INVALID_PARAMS,
3068 &rp, sizeof(rp));
3069
3070 hci_dev_lock(hdev);
3071
3072 if (!test_bit(HCI_UP, &hdev->flags)) {
3073 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3074 MGMT_STATUS_NOT_POWERED, &rp,
3075 sizeof(rp));
3076 goto failed;
3077 }
3078
3079 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3080 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3081 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3082 goto failed;
3083 }
3084
3085 if (cp->addr.type == BDADDR_BREDR)
3086 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3087 &cp->addr.bdaddr);
3088 else
3089 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3090 le_addr_type(cp->addr.type));
3091
3092 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3093 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3094 MGMT_STATUS_NOT_CONNECTED, &rp,
3095 sizeof(rp));
3096 goto failed;
3097 }
3098
3099 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3100 if (!cmd) {
3101 err = -ENOMEM;
3102 goto failed;
3103 }
3104
3105 cmd->cmd_complete = generic_cmd_complete;
3106
3107 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3108 if (err < 0)
3109 mgmt_pending_remove(cmd);
3110
3111 failed:
3112 hci_dev_unlock(hdev);
3113 return err;
3114 }
3115
link_to_bdaddr(u8 link_type,u8 addr_type)3116 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3117 {
3118 switch (link_type) {
3119 case ISO_LINK:
3120 case LE_LINK:
3121 switch (addr_type) {
3122 case ADDR_LE_DEV_PUBLIC:
3123 return BDADDR_LE_PUBLIC;
3124
3125 default:
3126 /* Fallback to LE Random address type */
3127 return BDADDR_LE_RANDOM;
3128 }
3129
3130 default:
3131 /* Fallback to BR/EDR type */
3132 return BDADDR_BREDR;
3133 }
3134 }
3135
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3136 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3137 u16 data_len)
3138 {
3139 struct mgmt_rp_get_connections *rp;
3140 struct hci_conn *c;
3141 int err;
3142 u16 i;
3143
3144 bt_dev_dbg(hdev, "sock %p", sk);
3145
3146 hci_dev_lock(hdev);
3147
3148 if (!hdev_is_powered(hdev)) {
3149 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3150 MGMT_STATUS_NOT_POWERED);
3151 goto unlock;
3152 }
3153
3154 i = 0;
3155 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3156 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3157 i++;
3158 }
3159
3160 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3161 if (!rp) {
3162 err = -ENOMEM;
3163 goto unlock;
3164 }
3165
3166 i = 0;
3167 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3168 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3169 continue;
3170 bacpy(&rp->addr[i].bdaddr, &c->dst);
3171 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3172 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3173 continue;
3174 i++;
3175 }
3176
3177 rp->conn_count = cpu_to_le16(i);
3178
3179 /* Recalculate length in case of filtered SCO connections, etc */
3180 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3181 struct_size(rp, addr, i));
3182
3183 kfree(rp);
3184
3185 unlock:
3186 hci_dev_unlock(hdev);
3187 return err;
3188 }
3189
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3190 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3191 struct mgmt_cp_pin_code_neg_reply *cp)
3192 {
3193 struct mgmt_pending_cmd *cmd;
3194 int err;
3195
3196 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3197 sizeof(*cp));
3198 if (!cmd)
3199 return -ENOMEM;
3200
3201 cmd->cmd_complete = addr_cmd_complete;
3202
3203 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3204 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3205 if (err < 0)
3206 mgmt_pending_remove(cmd);
3207
3208 return err;
3209 }
3210
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3211 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3212 u16 len)
3213 {
3214 struct hci_conn *conn;
3215 struct mgmt_cp_pin_code_reply *cp = data;
3216 struct hci_cp_pin_code_reply reply;
3217 struct mgmt_pending_cmd *cmd;
3218 int err;
3219
3220 bt_dev_dbg(hdev, "sock %p", sk);
3221
3222 hci_dev_lock(hdev);
3223
3224 if (!hdev_is_powered(hdev)) {
3225 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3226 MGMT_STATUS_NOT_POWERED);
3227 goto failed;
3228 }
3229
3230 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3231 if (!conn) {
3232 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3233 MGMT_STATUS_NOT_CONNECTED);
3234 goto failed;
3235 }
3236
3237 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3238 struct mgmt_cp_pin_code_neg_reply ncp;
3239
3240 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3241
3242 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3243
3244 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3245 if (err >= 0)
3246 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3247 MGMT_STATUS_INVALID_PARAMS);
3248
3249 goto failed;
3250 }
3251
3252 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3253 if (!cmd) {
3254 err = -ENOMEM;
3255 goto failed;
3256 }
3257
3258 cmd->cmd_complete = addr_cmd_complete;
3259
3260 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3261 reply.pin_len = cp->pin_len;
3262 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3263
3264 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3265 if (err < 0)
3266 mgmt_pending_remove(cmd);
3267
3268 failed:
3269 hci_dev_unlock(hdev);
3270 return err;
3271 }
3272
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3273 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3274 u16 len)
3275 {
3276 struct mgmt_cp_set_io_capability *cp = data;
3277
3278 bt_dev_dbg(hdev, "sock %p", sk);
3279
3280 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3281 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3282 MGMT_STATUS_INVALID_PARAMS);
3283
3284 hci_dev_lock(hdev);
3285
3286 hdev->io_capability = cp->io_capability;
3287
3288 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3289
3290 hci_dev_unlock(hdev);
3291
3292 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3293 NULL, 0);
3294 }
3295
find_pairing(struct hci_conn * conn)3296 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3297 {
3298 struct hci_dev *hdev = conn->hdev;
3299 struct mgmt_pending_cmd *cmd;
3300
3301 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3302 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3303 continue;
3304
3305 if (cmd->user_data != conn)
3306 continue;
3307
3308 return cmd;
3309 }
3310
3311 return NULL;
3312 }
3313
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3314 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3315 {
3316 struct mgmt_rp_pair_device rp;
3317 struct hci_conn *conn = cmd->user_data;
3318 int err;
3319
3320 bacpy(&rp.addr.bdaddr, &conn->dst);
3321 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3322
3323 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3324 status, &rp, sizeof(rp));
3325
3326 /* So we don't get further callbacks for this connection */
3327 conn->connect_cfm_cb = NULL;
3328 conn->security_cfm_cb = NULL;
3329 conn->disconn_cfm_cb = NULL;
3330
3331 hci_conn_drop(conn);
3332
3333 /* The device is paired so there is no need to remove
3334 * its connection parameters anymore.
3335 */
3336 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3337
3338 hci_conn_put(conn);
3339
3340 return err;
3341 }
3342
mgmt_smp_complete(struct hci_conn * conn,bool complete)3343 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3344 {
3345 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3346 struct mgmt_pending_cmd *cmd;
3347
3348 cmd = find_pairing(conn);
3349 if (cmd) {
3350 cmd->cmd_complete(cmd, status);
3351 mgmt_pending_remove(cmd);
3352 }
3353 }
3354
pairing_complete_cb(struct hci_conn * conn,u8 status)3355 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3356 {
3357 struct mgmt_pending_cmd *cmd;
3358
3359 BT_DBG("status %u", status);
3360
3361 cmd = find_pairing(conn);
3362 if (!cmd) {
3363 BT_DBG("Unable to find a pending command");
3364 return;
3365 }
3366
3367 cmd->cmd_complete(cmd, mgmt_status(status));
3368 mgmt_pending_remove(cmd);
3369 }
3370
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3371 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3372 {
3373 struct mgmt_pending_cmd *cmd;
3374
3375 BT_DBG("status %u", status);
3376
3377 if (!status)
3378 return;
3379
3380 cmd = find_pairing(conn);
3381 if (!cmd) {
3382 BT_DBG("Unable to find a pending command");
3383 return;
3384 }
3385
3386 cmd->cmd_complete(cmd, mgmt_status(status));
3387 mgmt_pending_remove(cmd);
3388 }
3389
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3390 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3391 u16 len)
3392 {
3393 struct mgmt_cp_pair_device *cp = data;
3394 struct mgmt_rp_pair_device rp;
3395 struct mgmt_pending_cmd *cmd;
3396 u8 sec_level, auth_type;
3397 struct hci_conn *conn;
3398 int err;
3399
3400 bt_dev_dbg(hdev, "sock %p", sk);
3401
3402 memset(&rp, 0, sizeof(rp));
3403 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3404 rp.addr.type = cp->addr.type;
3405
3406 if (!bdaddr_type_is_valid(cp->addr.type))
3407 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3408 MGMT_STATUS_INVALID_PARAMS,
3409 &rp, sizeof(rp));
3410
3411 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3412 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3413 MGMT_STATUS_INVALID_PARAMS,
3414 &rp, sizeof(rp));
3415
3416 hci_dev_lock(hdev);
3417
3418 if (!hdev_is_powered(hdev)) {
3419 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3420 MGMT_STATUS_NOT_POWERED, &rp,
3421 sizeof(rp));
3422 goto unlock;
3423 }
3424
3425 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3426 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3427 MGMT_STATUS_ALREADY_PAIRED, &rp,
3428 sizeof(rp));
3429 goto unlock;
3430 }
3431
3432 sec_level = BT_SECURITY_MEDIUM;
3433 auth_type = HCI_AT_DEDICATED_BONDING;
3434
3435 if (cp->addr.type == BDADDR_BREDR) {
3436 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3437 auth_type, CONN_REASON_PAIR_DEVICE);
3438 } else {
3439 u8 addr_type = le_addr_type(cp->addr.type);
3440 struct hci_conn_params *p;
3441
3442 /* When pairing a new device, it is expected to remember
3443 * this device for future connections. Adding the connection
3444 * parameter information ahead of time allows tracking
3445 * of the peripheral preferred values and will speed up any
3446 * further connection establishment.
3447 *
3448 * If connection parameters already exist, then they
3449 * will be kept and this function does nothing.
3450 */
3451 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3452
3453 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3454 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3455
3456 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3457 sec_level, HCI_LE_CONN_TIMEOUT,
3458 CONN_REASON_PAIR_DEVICE);
3459 }
3460
3461 if (IS_ERR(conn)) {
3462 int status;
3463
3464 if (PTR_ERR(conn) == -EBUSY)
3465 status = MGMT_STATUS_BUSY;
3466 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3467 status = MGMT_STATUS_NOT_SUPPORTED;
3468 else if (PTR_ERR(conn) == -ECONNREFUSED)
3469 status = MGMT_STATUS_REJECTED;
3470 else
3471 status = MGMT_STATUS_CONNECT_FAILED;
3472
3473 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3474 status, &rp, sizeof(rp));
3475 goto unlock;
3476 }
3477
3478 if (conn->connect_cfm_cb) {
3479 hci_conn_drop(conn);
3480 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3482 goto unlock;
3483 }
3484
3485 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3486 if (!cmd) {
3487 err = -ENOMEM;
3488 hci_conn_drop(conn);
3489 goto unlock;
3490 }
3491
3492 cmd->cmd_complete = pairing_complete;
3493
3494 /* For LE, just connecting isn't a proof that the pairing finished */
3495 if (cp->addr.type == BDADDR_BREDR) {
3496 conn->connect_cfm_cb = pairing_complete_cb;
3497 conn->security_cfm_cb = pairing_complete_cb;
3498 conn->disconn_cfm_cb = pairing_complete_cb;
3499 } else {
3500 conn->connect_cfm_cb = le_pairing_complete_cb;
3501 conn->security_cfm_cb = le_pairing_complete_cb;
3502 conn->disconn_cfm_cb = le_pairing_complete_cb;
3503 }
3504
3505 conn->io_capability = cp->io_cap;
3506 cmd->user_data = hci_conn_get(conn);
3507
3508 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3509 hci_conn_security(conn, sec_level, auth_type, true)) {
3510 cmd->cmd_complete(cmd, 0);
3511 mgmt_pending_remove(cmd);
3512 }
3513
3514 err = 0;
3515
3516 unlock:
3517 hci_dev_unlock(hdev);
3518 return err;
3519 }
3520
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3521 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3522 u16 len)
3523 {
3524 struct mgmt_addr_info *addr = data;
3525 struct mgmt_pending_cmd *cmd;
3526 struct hci_conn *conn;
3527 int err;
3528
3529 bt_dev_dbg(hdev, "sock %p", sk);
3530
3531 hci_dev_lock(hdev);
3532
3533 if (!hdev_is_powered(hdev)) {
3534 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3535 MGMT_STATUS_NOT_POWERED);
3536 goto unlock;
3537 }
3538
3539 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3540 if (!cmd) {
3541 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3542 MGMT_STATUS_INVALID_PARAMS);
3543 goto unlock;
3544 }
3545
3546 conn = cmd->user_data;
3547
3548 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3549 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3550 MGMT_STATUS_INVALID_PARAMS);
3551 goto unlock;
3552 }
3553
3554 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3555 mgmt_pending_remove(cmd);
3556
3557 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3558 addr, sizeof(*addr));
3559
3560 /* Since user doesn't want to proceed with the connection, abort any
3561 * ongoing pairing and then terminate the link if it was created
3562 * because of the pair device action.
3563 */
3564 if (addr->type == BDADDR_BREDR)
3565 hci_remove_link_key(hdev, &addr->bdaddr);
3566 else
3567 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3568 le_addr_type(addr->type));
3569
3570 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3571 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3572
3573 unlock:
3574 hci_dev_unlock(hdev);
3575 return err;
3576 }
3577
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3578 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3579 struct mgmt_addr_info *addr, u16 mgmt_op,
3580 u16 hci_op, __le32 passkey)
3581 {
3582 struct mgmt_pending_cmd *cmd;
3583 struct hci_conn *conn;
3584 int err;
3585
3586 hci_dev_lock(hdev);
3587
3588 if (!hdev_is_powered(hdev)) {
3589 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3590 MGMT_STATUS_NOT_POWERED, addr,
3591 sizeof(*addr));
3592 goto done;
3593 }
3594
3595 if (addr->type == BDADDR_BREDR)
3596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3597 else
3598 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3599 le_addr_type(addr->type));
3600
3601 if (!conn) {
3602 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3603 MGMT_STATUS_NOT_CONNECTED, addr,
3604 sizeof(*addr));
3605 goto done;
3606 }
3607
3608 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3609 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3610 if (!err)
3611 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3612 MGMT_STATUS_SUCCESS, addr,
3613 sizeof(*addr));
3614 else
3615 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3616 MGMT_STATUS_FAILED, addr,
3617 sizeof(*addr));
3618
3619 goto done;
3620 }
3621
3622 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3623 if (!cmd) {
3624 err = -ENOMEM;
3625 goto done;
3626 }
3627
3628 cmd->cmd_complete = addr_cmd_complete;
3629
3630 /* Continue with pairing via HCI */
3631 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3632 struct hci_cp_user_passkey_reply cp;
3633
3634 bacpy(&cp.bdaddr, &addr->bdaddr);
3635 cp.passkey = passkey;
3636 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3637 } else
3638 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3639 &addr->bdaddr);
3640
3641 if (err < 0)
3642 mgmt_pending_remove(cmd);
3643
3644 done:
3645 hci_dev_unlock(hdev);
3646 return err;
3647 }
3648
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3649 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3650 void *data, u16 len)
3651 {
3652 struct mgmt_cp_pin_code_neg_reply *cp = data;
3653
3654 bt_dev_dbg(hdev, "sock %p", sk);
3655
3656 return user_pairing_resp(sk, hdev, &cp->addr,
3657 MGMT_OP_PIN_CODE_NEG_REPLY,
3658 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3659 }
3660
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3661 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3662 u16 len)
3663 {
3664 struct mgmt_cp_user_confirm_reply *cp = data;
3665
3666 bt_dev_dbg(hdev, "sock %p", sk);
3667
3668 if (len != sizeof(*cp))
3669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3670 MGMT_STATUS_INVALID_PARAMS);
3671
3672 return user_pairing_resp(sk, hdev, &cp->addr,
3673 MGMT_OP_USER_CONFIRM_REPLY,
3674 HCI_OP_USER_CONFIRM_REPLY, 0);
3675 }
3676
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3677 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3678 void *data, u16 len)
3679 {
3680 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3681
3682 bt_dev_dbg(hdev, "sock %p", sk);
3683
3684 return user_pairing_resp(sk, hdev, &cp->addr,
3685 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3686 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3687 }
3688
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3689 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3690 u16 len)
3691 {
3692 struct mgmt_cp_user_passkey_reply *cp = data;
3693
3694 bt_dev_dbg(hdev, "sock %p", sk);
3695
3696 return user_pairing_resp(sk, hdev, &cp->addr,
3697 MGMT_OP_USER_PASSKEY_REPLY,
3698 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3699 }
3700
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3701 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3702 void *data, u16 len)
3703 {
3704 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3705
3706 bt_dev_dbg(hdev, "sock %p", sk);
3707
3708 return user_pairing_resp(sk, hdev, &cp->addr,
3709 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3710 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3711 }
3712
adv_expire_sync(struct hci_dev * hdev,u32 flags)3713 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3714 {
3715 struct adv_info *adv_instance;
3716
3717 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3718 if (!adv_instance)
3719 return 0;
3720
3721 /* stop if current instance doesn't need to be changed */
3722 if (!(adv_instance->flags & flags))
3723 return 0;
3724
3725 cancel_adv_timeout(hdev);
3726
3727 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3728 if (!adv_instance)
3729 return 0;
3730
3731 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3732
3733 return 0;
3734 }
3735
name_changed_sync(struct hci_dev * hdev,void * data)3736 static int name_changed_sync(struct hci_dev *hdev, void *data)
3737 {
3738 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3739 }
3740
set_name_complete(struct hci_dev * hdev,void * data,int err)3741 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3742 {
3743 struct mgmt_pending_cmd *cmd = data;
3744 struct mgmt_cp_set_local_name *cp = cmd->param;
3745 u8 status = mgmt_status(err);
3746
3747 bt_dev_dbg(hdev, "err %d", err);
3748
3749 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3750 return;
3751
3752 if (status) {
3753 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3754 status);
3755 } else {
3756 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3757 cp, sizeof(*cp));
3758
3759 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3760 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3761 }
3762
3763 mgmt_pending_remove(cmd);
3764 }
3765
set_name_sync(struct hci_dev * hdev,void * data)3766 static int set_name_sync(struct hci_dev *hdev, void *data)
3767 {
3768 if (lmp_bredr_capable(hdev)) {
3769 hci_update_name_sync(hdev);
3770 hci_update_eir_sync(hdev);
3771 }
3772
3773 /* The name is stored in the scan response data and so
3774 * no need to update the advertising data here.
3775 */
3776 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3777 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3778
3779 return 0;
3780 }
3781
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3782 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3783 u16 len)
3784 {
3785 struct mgmt_cp_set_local_name *cp = data;
3786 struct mgmt_pending_cmd *cmd;
3787 int err;
3788
3789 bt_dev_dbg(hdev, "sock %p", sk);
3790
3791 hci_dev_lock(hdev);
3792
3793 /* If the old values are the same as the new ones just return a
3794 * direct command complete event.
3795 */
3796 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3797 !memcmp(hdev->short_name, cp->short_name,
3798 sizeof(hdev->short_name))) {
3799 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3800 data, len);
3801 goto failed;
3802 }
3803
3804 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3805
3806 if (!hdev_is_powered(hdev)) {
3807 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3808
3809 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3810 data, len);
3811 if (err < 0)
3812 goto failed;
3813
3814 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3815 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3816 ext_info_changed(hdev, sk);
3817
3818 goto failed;
3819 }
3820
3821 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3822 if (!cmd)
3823 err = -ENOMEM;
3824 else
3825 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3826 set_name_complete);
3827
3828 if (err < 0) {
3829 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3830 MGMT_STATUS_FAILED);
3831
3832 if (cmd)
3833 mgmt_pending_remove(cmd);
3834
3835 goto failed;
3836 }
3837
3838 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3839
3840 failed:
3841 hci_dev_unlock(hdev);
3842 return err;
3843 }
3844
appearance_changed_sync(struct hci_dev * hdev,void * data)3845 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3846 {
3847 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3848 }
3849
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3850 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3851 u16 len)
3852 {
3853 struct mgmt_cp_set_appearance *cp = data;
3854 u16 appearance;
3855 int err;
3856
3857 bt_dev_dbg(hdev, "sock %p", sk);
3858
3859 if (!lmp_le_capable(hdev))
3860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3861 MGMT_STATUS_NOT_SUPPORTED);
3862
3863 appearance = le16_to_cpu(cp->appearance);
3864
3865 hci_dev_lock(hdev);
3866
3867 if (hdev->appearance != appearance) {
3868 hdev->appearance = appearance;
3869
3870 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3871 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3872 NULL);
3873
3874 ext_info_changed(hdev, sk);
3875 }
3876
3877 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3878 0);
3879
3880 hci_dev_unlock(hdev);
3881
3882 return err;
3883 }
3884
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3885 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3886 void *data, u16 len)
3887 {
3888 struct mgmt_rp_get_phy_configuration rp;
3889
3890 bt_dev_dbg(hdev, "sock %p", sk);
3891
3892 hci_dev_lock(hdev);
3893
3894 memset(&rp, 0, sizeof(rp));
3895
3896 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3897 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3898 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3899
3900 hci_dev_unlock(hdev);
3901
3902 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3903 &rp, sizeof(rp));
3904 }
3905
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3906 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3907 {
3908 struct mgmt_ev_phy_configuration_changed ev;
3909
3910 memset(&ev, 0, sizeof(ev));
3911
3912 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3913
3914 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3915 sizeof(ev), skip);
3916 }
3917
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3918 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3919 {
3920 struct mgmt_pending_cmd *cmd = data;
3921 struct sk_buff *skb = cmd->skb;
3922 u8 status = mgmt_status(err);
3923
3924 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3925 return;
3926
3927 if (!status) {
3928 if (!skb)
3929 status = MGMT_STATUS_FAILED;
3930 else if (IS_ERR(skb))
3931 status = mgmt_status(PTR_ERR(skb));
3932 else
3933 status = mgmt_status(skb->data[0]);
3934 }
3935
3936 bt_dev_dbg(hdev, "status %d", status);
3937
3938 if (status) {
3939 mgmt_cmd_status(cmd->sk, hdev->id,
3940 MGMT_OP_SET_PHY_CONFIGURATION, status);
3941 } else {
3942 mgmt_cmd_complete(cmd->sk, hdev->id,
3943 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3944 NULL, 0);
3945
3946 mgmt_phy_configuration_changed(hdev, cmd->sk);
3947 }
3948
3949 if (skb && !IS_ERR(skb))
3950 kfree_skb(skb);
3951
3952 mgmt_pending_remove(cmd);
3953 }
3954
set_default_phy_sync(struct hci_dev * hdev,void * data)3955 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3956 {
3957 struct mgmt_pending_cmd *cmd = data;
3958 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3959 struct hci_cp_le_set_default_phy cp_phy;
3960 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3961
3962 memset(&cp_phy, 0, sizeof(cp_phy));
3963
3964 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3965 cp_phy.all_phys |= 0x01;
3966
3967 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3968 cp_phy.all_phys |= 0x02;
3969
3970 if (selected_phys & MGMT_PHY_LE_1M_TX)
3971 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3972
3973 if (selected_phys & MGMT_PHY_LE_2M_TX)
3974 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3975
3976 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3977 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3978
3979 if (selected_phys & MGMT_PHY_LE_1M_RX)
3980 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3981
3982 if (selected_phys & MGMT_PHY_LE_2M_RX)
3983 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3984
3985 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3986 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3987
3988 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3989 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3990
3991 return 0;
3992 }
3993
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3994 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3995 void *data, u16 len)
3996 {
3997 struct mgmt_cp_set_phy_configuration *cp = data;
3998 struct mgmt_pending_cmd *cmd;
3999 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4000 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4001 bool changed = false;
4002 int err;
4003
4004 bt_dev_dbg(hdev, "sock %p", sk);
4005
4006 configurable_phys = get_configurable_phys(hdev);
4007 supported_phys = get_supported_phys(hdev);
4008 selected_phys = __le32_to_cpu(cp->selected_phys);
4009
4010 if (selected_phys & ~supported_phys)
4011 return mgmt_cmd_status(sk, hdev->id,
4012 MGMT_OP_SET_PHY_CONFIGURATION,
4013 MGMT_STATUS_INVALID_PARAMS);
4014
4015 unconfigure_phys = supported_phys & ~configurable_phys;
4016
4017 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4018 return mgmt_cmd_status(sk, hdev->id,
4019 MGMT_OP_SET_PHY_CONFIGURATION,
4020 MGMT_STATUS_INVALID_PARAMS);
4021
4022 if (selected_phys == get_selected_phys(hdev))
4023 return mgmt_cmd_complete(sk, hdev->id,
4024 MGMT_OP_SET_PHY_CONFIGURATION,
4025 0, NULL, 0);
4026
4027 hci_dev_lock(hdev);
4028
4029 if (!hdev_is_powered(hdev)) {
4030 err = mgmt_cmd_status(sk, hdev->id,
4031 MGMT_OP_SET_PHY_CONFIGURATION,
4032 MGMT_STATUS_REJECTED);
4033 goto unlock;
4034 }
4035
4036 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4037 err = mgmt_cmd_status(sk, hdev->id,
4038 MGMT_OP_SET_PHY_CONFIGURATION,
4039 MGMT_STATUS_BUSY);
4040 goto unlock;
4041 }
4042
4043 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4044 pkt_type |= (HCI_DH3 | HCI_DM3);
4045 else
4046 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4047
4048 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4049 pkt_type |= (HCI_DH5 | HCI_DM5);
4050 else
4051 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4052
4053 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4054 pkt_type &= ~HCI_2DH1;
4055 else
4056 pkt_type |= HCI_2DH1;
4057
4058 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4059 pkt_type &= ~HCI_2DH3;
4060 else
4061 pkt_type |= HCI_2DH3;
4062
4063 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4064 pkt_type &= ~HCI_2DH5;
4065 else
4066 pkt_type |= HCI_2DH5;
4067
4068 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4069 pkt_type &= ~HCI_3DH1;
4070 else
4071 pkt_type |= HCI_3DH1;
4072
4073 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4074 pkt_type &= ~HCI_3DH3;
4075 else
4076 pkt_type |= HCI_3DH3;
4077
4078 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4079 pkt_type &= ~HCI_3DH5;
4080 else
4081 pkt_type |= HCI_3DH5;
4082
4083 if (pkt_type != hdev->pkt_type) {
4084 hdev->pkt_type = pkt_type;
4085 changed = true;
4086 }
4087
4088 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4089 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4090 if (changed)
4091 mgmt_phy_configuration_changed(hdev, sk);
4092
4093 err = mgmt_cmd_complete(sk, hdev->id,
4094 MGMT_OP_SET_PHY_CONFIGURATION,
4095 0, NULL, 0);
4096
4097 goto unlock;
4098 }
4099
4100 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4101 len);
4102 if (!cmd)
4103 err = -ENOMEM;
4104 else
4105 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4106 set_default_phy_complete);
4107
4108 if (err < 0) {
4109 err = mgmt_cmd_status(sk, hdev->id,
4110 MGMT_OP_SET_PHY_CONFIGURATION,
4111 MGMT_STATUS_FAILED);
4112
4113 if (cmd)
4114 mgmt_pending_remove(cmd);
4115 }
4116
4117 unlock:
4118 hci_dev_unlock(hdev);
4119
4120 return err;
4121 }
4122
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4123 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4124 u16 len)
4125 {
4126 int err = MGMT_STATUS_SUCCESS;
4127 struct mgmt_cp_set_blocked_keys *keys = data;
4128 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4129 sizeof(struct mgmt_blocked_key_info));
4130 u16 key_count, expected_len;
4131 int i;
4132
4133 bt_dev_dbg(hdev, "sock %p", sk);
4134
4135 key_count = __le16_to_cpu(keys->key_count);
4136 if (key_count > max_key_count) {
4137 bt_dev_err(hdev, "too big key_count value %u", key_count);
4138 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4139 MGMT_STATUS_INVALID_PARAMS);
4140 }
4141
4142 expected_len = struct_size(keys, keys, key_count);
4143 if (expected_len != len) {
4144 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4145 expected_len, len);
4146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4147 MGMT_STATUS_INVALID_PARAMS);
4148 }
4149
4150 hci_dev_lock(hdev);
4151
4152 hci_blocked_keys_clear(hdev);
4153
4154 for (i = 0; i < key_count; ++i) {
4155 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4156
4157 if (!b) {
4158 err = MGMT_STATUS_NO_RESOURCES;
4159 break;
4160 }
4161
4162 b->type = keys->keys[i].type;
4163 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4164 list_add_rcu(&b->list, &hdev->blocked_keys);
4165 }
4166 hci_dev_unlock(hdev);
4167
4168 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4169 err, NULL, 0);
4170 }
4171
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4172 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4173 void *data, u16 len)
4174 {
4175 struct mgmt_mode *cp = data;
4176 int err;
4177 bool changed = false;
4178
4179 bt_dev_dbg(hdev, "sock %p", sk);
4180
4181 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4182 return mgmt_cmd_status(sk, hdev->id,
4183 MGMT_OP_SET_WIDEBAND_SPEECH,
4184 MGMT_STATUS_NOT_SUPPORTED);
4185
4186 if (cp->val != 0x00 && cp->val != 0x01)
4187 return mgmt_cmd_status(sk, hdev->id,
4188 MGMT_OP_SET_WIDEBAND_SPEECH,
4189 MGMT_STATUS_INVALID_PARAMS);
4190
4191 hci_dev_lock(hdev);
4192
4193 if (hdev_is_powered(hdev) &&
4194 !!cp->val != hci_dev_test_flag(hdev,
4195 HCI_WIDEBAND_SPEECH_ENABLED)) {
4196 err = mgmt_cmd_status(sk, hdev->id,
4197 MGMT_OP_SET_WIDEBAND_SPEECH,
4198 MGMT_STATUS_REJECTED);
4199 goto unlock;
4200 }
4201
4202 if (cp->val)
4203 changed = !hci_dev_test_and_set_flag(hdev,
4204 HCI_WIDEBAND_SPEECH_ENABLED);
4205 else
4206 changed = hci_dev_test_and_clear_flag(hdev,
4207 HCI_WIDEBAND_SPEECH_ENABLED);
4208
4209 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4210 if (err < 0)
4211 goto unlock;
4212
4213 if (changed)
4214 err = new_settings(hdev, sk);
4215
4216 unlock:
4217 hci_dev_unlock(hdev);
4218 return err;
4219 }
4220
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4221 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4222 void *data, u16 data_len)
4223 {
4224 char buf[20];
4225 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4226 u16 cap_len = 0;
4227 u8 flags = 0;
4228 u8 tx_power_range[2];
4229
4230 bt_dev_dbg(hdev, "sock %p", sk);
4231
4232 memset(&buf, 0, sizeof(buf));
4233
4234 hci_dev_lock(hdev);
4235
4236 /* When the Read Simple Pairing Options command is supported, then
4237 * the remote public key validation is supported.
4238 *
4239 * Alternatively, when Microsoft extensions are available, they can
4240 * indicate support for public key validation as well.
4241 */
4242 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4243 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4244
4245 flags |= 0x02; /* Remote public key validation (LE) */
4246
4247 /* When the Read Encryption Key Size command is supported, then the
4248 * encryption key size is enforced.
4249 */
4250 if (hdev->commands[20] & 0x10)
4251 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4252
4253 flags |= 0x08; /* Encryption key size enforcement (LE) */
4254
4255 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4256 &flags, 1);
4257
4258 /* When the Read Simple Pairing Options command is supported, then
4259 * also max encryption key size information is provided.
4260 */
4261 if (hdev->commands[41] & 0x08)
4262 cap_len = eir_append_le16(rp->cap, cap_len,
4263 MGMT_CAP_MAX_ENC_KEY_SIZE,
4264 hdev->max_enc_key_size);
4265
4266 cap_len = eir_append_le16(rp->cap, cap_len,
4267 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4268 SMP_MAX_ENC_KEY_SIZE);
4269
4270 /* Append the min/max LE tx power parameters if we were able to fetch
4271 * it from the controller
4272 */
4273 if (hdev->commands[38] & 0x80) {
4274 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4275 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4276 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4277 tx_power_range, 2);
4278 }
4279
4280 rp->cap_len = cpu_to_le16(cap_len);
4281
4282 hci_dev_unlock(hdev);
4283
4284 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4285 rp, sizeof(*rp) + cap_len);
4286 }
4287
4288 #ifdef CONFIG_BT_FEATURE_DEBUG
4289 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4290 static const u8 debug_uuid[16] = {
4291 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4292 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4293 };
4294 #endif
4295
4296 /* 330859bc-7506-492d-9370-9a6f0614037f */
4297 static const u8 quality_report_uuid[16] = {
4298 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4299 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4300 };
4301
4302 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4303 static const u8 offload_codecs_uuid[16] = {
4304 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4305 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4306 };
4307
4308 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4309 static const u8 le_simultaneous_roles_uuid[16] = {
4310 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4311 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4312 };
4313
4314 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4315 static const u8 rpa_resolution_uuid[16] = {
4316 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4317 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4318 };
4319
4320 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4321 static const u8 iso_socket_uuid[16] = {
4322 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4323 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4324 };
4325
4326 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4327 static const u8 mgmt_mesh_uuid[16] = {
4328 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4329 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4330 };
4331
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4332 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4333 void *data, u16 data_len)
4334 {
4335 struct mgmt_rp_read_exp_features_info *rp;
4336 size_t len;
4337 u16 idx = 0;
4338 u32 flags;
4339 int status;
4340
4341 bt_dev_dbg(hdev, "sock %p", sk);
4342
4343 /* Enough space for 7 features */
4344 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4345 rp = kzalloc(len, GFP_KERNEL);
4346 if (!rp)
4347 return -ENOMEM;
4348
4349 #ifdef CONFIG_BT_FEATURE_DEBUG
4350 if (!hdev) {
4351 flags = bt_dbg_get() ? BIT(0) : 0;
4352
4353 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4354 rp->features[idx].flags = cpu_to_le32(flags);
4355 idx++;
4356 }
4357 #endif
4358
4359 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4360 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4361 flags = BIT(0);
4362 else
4363 flags = 0;
4364
4365 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4366 rp->features[idx].flags = cpu_to_le32(flags);
4367 idx++;
4368 }
4369
4370 if (hdev && ll_privacy_capable(hdev)) {
4371 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4372 flags = BIT(0) | BIT(1);
4373 else
4374 flags = BIT(1);
4375
4376 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4377 rp->features[idx].flags = cpu_to_le32(flags);
4378 idx++;
4379 }
4380
4381 if (hdev && (aosp_has_quality_report(hdev) ||
4382 hdev->set_quality_report)) {
4383 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4384 flags = BIT(0);
4385 else
4386 flags = 0;
4387
4388 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4389 rp->features[idx].flags = cpu_to_le32(flags);
4390 idx++;
4391 }
4392
4393 if (hdev && hdev->get_data_path_id) {
4394 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4395 flags = BIT(0);
4396 else
4397 flags = 0;
4398
4399 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4400 rp->features[idx].flags = cpu_to_le32(flags);
4401 idx++;
4402 }
4403
4404 if (IS_ENABLED(CONFIG_BT_LE)) {
4405 flags = iso_enabled() ? BIT(0) : 0;
4406 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4407 rp->features[idx].flags = cpu_to_le32(flags);
4408 idx++;
4409 }
4410
4411 if (hdev && lmp_le_capable(hdev)) {
4412 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4413 flags = BIT(0);
4414 else
4415 flags = 0;
4416
4417 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4418 rp->features[idx].flags = cpu_to_le32(flags);
4419 idx++;
4420 }
4421
4422 rp->feature_count = cpu_to_le16(idx);
4423
4424 /* After reading the experimental features information, enable
4425 * the events to update client on any future change.
4426 */
4427 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4428
4429 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4430 MGMT_OP_READ_EXP_FEATURES_INFO,
4431 0, rp, sizeof(*rp) + (20 * idx));
4432
4433 kfree(rp);
4434 return status;
4435 }
4436
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4437 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4438 struct sock *skip)
4439 {
4440 struct mgmt_ev_exp_feature_changed ev;
4441
4442 memset(&ev, 0, sizeof(ev));
4443 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4444 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4445
4446 // Do we need to be atomic with the conn_flags?
4447 if (enabled && privacy_mode_capable(hdev))
4448 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4449 else
4450 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4451
4452 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4453 &ev, sizeof(ev),
4454 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4455
4456 }
4457
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4458 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4459 bool enabled, struct sock *skip)
4460 {
4461 struct mgmt_ev_exp_feature_changed ev;
4462
4463 memset(&ev, 0, sizeof(ev));
4464 memcpy(ev.uuid, uuid, 16);
4465 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4466
4467 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4468 &ev, sizeof(ev),
4469 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4470 }
4471
4472 #define EXP_FEAT(_uuid, _set_func) \
4473 { \
4474 .uuid = _uuid, \
4475 .set_func = _set_func, \
4476 }
4477
4478 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4479 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4480 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4481 {
4482 struct mgmt_rp_set_exp_feature rp;
4483
4484 memset(rp.uuid, 0, 16);
4485 rp.flags = cpu_to_le32(0);
4486
4487 #ifdef CONFIG_BT_FEATURE_DEBUG
4488 if (!hdev) {
4489 bool changed = bt_dbg_get();
4490
4491 bt_dbg_set(false);
4492
4493 if (changed)
4494 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4495 }
4496 #endif
4497
4498 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4499 bool changed;
4500
4501 changed = hci_dev_test_and_clear_flag(hdev,
4502 HCI_ENABLE_LL_PRIVACY);
4503 if (changed)
4504 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4505 sk);
4506 }
4507
4508 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4509
4510 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4511 MGMT_OP_SET_EXP_FEATURE, 0,
4512 &rp, sizeof(rp));
4513 }
4514
4515 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4516 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4517 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4518 {
4519 struct mgmt_rp_set_exp_feature rp;
4520
4521 bool val, changed;
4522 int err;
4523
4524 /* Command requires to use the non-controller index */
4525 if (hdev)
4526 return mgmt_cmd_status(sk, hdev->id,
4527 MGMT_OP_SET_EXP_FEATURE,
4528 MGMT_STATUS_INVALID_INDEX);
4529
4530 /* Parameters are limited to a single octet */
4531 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4532 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4533 MGMT_OP_SET_EXP_FEATURE,
4534 MGMT_STATUS_INVALID_PARAMS);
4535
4536 /* Only boolean on/off is supported */
4537 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4538 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4539 MGMT_OP_SET_EXP_FEATURE,
4540 MGMT_STATUS_INVALID_PARAMS);
4541
4542 val = !!cp->param[0];
4543 changed = val ? !bt_dbg_get() : bt_dbg_get();
4544 bt_dbg_set(val);
4545
4546 memcpy(rp.uuid, debug_uuid, 16);
4547 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4548
4549 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4550
4551 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4552 MGMT_OP_SET_EXP_FEATURE, 0,
4553 &rp, sizeof(rp));
4554
4555 if (changed)
4556 exp_feature_changed(hdev, debug_uuid, val, sk);
4557
4558 return err;
4559 }
4560 #endif
4561
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4562 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4563 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4564 {
4565 struct mgmt_rp_set_exp_feature rp;
4566 bool val, changed;
4567 int err;
4568
4569 /* Command requires to use the controller index */
4570 if (!hdev)
4571 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4572 MGMT_OP_SET_EXP_FEATURE,
4573 MGMT_STATUS_INVALID_INDEX);
4574
4575 /* Parameters are limited to a single octet */
4576 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4577 return mgmt_cmd_status(sk, hdev->id,
4578 MGMT_OP_SET_EXP_FEATURE,
4579 MGMT_STATUS_INVALID_PARAMS);
4580
4581 /* Only boolean on/off is supported */
4582 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4583 return mgmt_cmd_status(sk, hdev->id,
4584 MGMT_OP_SET_EXP_FEATURE,
4585 MGMT_STATUS_INVALID_PARAMS);
4586
4587 val = !!cp->param[0];
4588
4589 if (val) {
4590 changed = !hci_dev_test_and_set_flag(hdev,
4591 HCI_MESH_EXPERIMENTAL);
4592 } else {
4593 hci_dev_clear_flag(hdev, HCI_MESH);
4594 changed = hci_dev_test_and_clear_flag(hdev,
4595 HCI_MESH_EXPERIMENTAL);
4596 }
4597
4598 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4599 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4600
4601 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4602
4603 err = mgmt_cmd_complete(sk, hdev->id,
4604 MGMT_OP_SET_EXP_FEATURE, 0,
4605 &rp, sizeof(rp));
4606
4607 if (changed)
4608 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4609
4610 return err;
4611 }
4612
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4613 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4614 struct mgmt_cp_set_exp_feature *cp,
4615 u16 data_len)
4616 {
4617 struct mgmt_rp_set_exp_feature rp;
4618 bool val, changed;
4619 int err;
4620 u32 flags;
4621
4622 /* Command requires to use the controller index */
4623 if (!hdev)
4624 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4625 MGMT_OP_SET_EXP_FEATURE,
4626 MGMT_STATUS_INVALID_INDEX);
4627
4628 /* Changes can only be made when controller is powered down */
4629 if (hdev_is_powered(hdev))
4630 return mgmt_cmd_status(sk, hdev->id,
4631 MGMT_OP_SET_EXP_FEATURE,
4632 MGMT_STATUS_REJECTED);
4633
4634 /* Parameters are limited to a single octet */
4635 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4636 return mgmt_cmd_status(sk, hdev->id,
4637 MGMT_OP_SET_EXP_FEATURE,
4638 MGMT_STATUS_INVALID_PARAMS);
4639
4640 /* Only boolean on/off is supported */
4641 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4642 return mgmt_cmd_status(sk, hdev->id,
4643 MGMT_OP_SET_EXP_FEATURE,
4644 MGMT_STATUS_INVALID_PARAMS);
4645
4646 val = !!cp->param[0];
4647
4648 if (val) {
4649 changed = !hci_dev_test_and_set_flag(hdev,
4650 HCI_ENABLE_LL_PRIVACY);
4651 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4652
4653 /* Enable LL privacy + supported settings changed */
4654 flags = BIT(0) | BIT(1);
4655 } else {
4656 changed = hci_dev_test_and_clear_flag(hdev,
4657 HCI_ENABLE_LL_PRIVACY);
4658
4659 /* Disable LL privacy + supported settings changed */
4660 flags = BIT(1);
4661 }
4662
4663 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4664 rp.flags = cpu_to_le32(flags);
4665
4666 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4667
4668 err = mgmt_cmd_complete(sk, hdev->id,
4669 MGMT_OP_SET_EXP_FEATURE, 0,
4670 &rp, sizeof(rp));
4671
4672 if (changed)
4673 exp_ll_privacy_feature_changed(val, hdev, sk);
4674
4675 return err;
4676 }
4677
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4678 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4679 struct mgmt_cp_set_exp_feature *cp,
4680 u16 data_len)
4681 {
4682 struct mgmt_rp_set_exp_feature rp;
4683 bool val, changed;
4684 int err;
4685
4686 /* Command requires to use a valid controller index */
4687 if (!hdev)
4688 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4689 MGMT_OP_SET_EXP_FEATURE,
4690 MGMT_STATUS_INVALID_INDEX);
4691
4692 /* Parameters are limited to a single octet */
4693 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4694 return mgmt_cmd_status(sk, hdev->id,
4695 MGMT_OP_SET_EXP_FEATURE,
4696 MGMT_STATUS_INVALID_PARAMS);
4697
4698 /* Only boolean on/off is supported */
4699 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4700 return mgmt_cmd_status(sk, hdev->id,
4701 MGMT_OP_SET_EXP_FEATURE,
4702 MGMT_STATUS_INVALID_PARAMS);
4703
4704 hci_req_sync_lock(hdev);
4705
4706 val = !!cp->param[0];
4707 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4708
4709 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4710 err = mgmt_cmd_status(sk, hdev->id,
4711 MGMT_OP_SET_EXP_FEATURE,
4712 MGMT_STATUS_NOT_SUPPORTED);
4713 goto unlock_quality_report;
4714 }
4715
4716 if (changed) {
4717 if (hdev->set_quality_report)
4718 err = hdev->set_quality_report(hdev, val);
4719 else
4720 err = aosp_set_quality_report(hdev, val);
4721
4722 if (err) {
4723 err = mgmt_cmd_status(sk, hdev->id,
4724 MGMT_OP_SET_EXP_FEATURE,
4725 MGMT_STATUS_FAILED);
4726 goto unlock_quality_report;
4727 }
4728
4729 if (val)
4730 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4731 else
4732 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4733 }
4734
4735 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4736
4737 memcpy(rp.uuid, quality_report_uuid, 16);
4738 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4739 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4740
4741 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4742 &rp, sizeof(rp));
4743
4744 if (changed)
4745 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4746
4747 unlock_quality_report:
4748 hci_req_sync_unlock(hdev);
4749 return err;
4750 }
4751
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4752 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4753 struct mgmt_cp_set_exp_feature *cp,
4754 u16 data_len)
4755 {
4756 bool val, changed;
4757 int err;
4758 struct mgmt_rp_set_exp_feature rp;
4759
4760 /* Command requires to use a valid controller index */
4761 if (!hdev)
4762 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4763 MGMT_OP_SET_EXP_FEATURE,
4764 MGMT_STATUS_INVALID_INDEX);
4765
4766 /* Parameters are limited to a single octet */
4767 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4768 return mgmt_cmd_status(sk, hdev->id,
4769 MGMT_OP_SET_EXP_FEATURE,
4770 MGMT_STATUS_INVALID_PARAMS);
4771
4772 /* Only boolean on/off is supported */
4773 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4774 return mgmt_cmd_status(sk, hdev->id,
4775 MGMT_OP_SET_EXP_FEATURE,
4776 MGMT_STATUS_INVALID_PARAMS);
4777
4778 val = !!cp->param[0];
4779 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4780
4781 if (!hdev->get_data_path_id) {
4782 return mgmt_cmd_status(sk, hdev->id,
4783 MGMT_OP_SET_EXP_FEATURE,
4784 MGMT_STATUS_NOT_SUPPORTED);
4785 }
4786
4787 if (changed) {
4788 if (val)
4789 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4790 else
4791 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4792 }
4793
4794 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4795 val, changed);
4796
4797 memcpy(rp.uuid, offload_codecs_uuid, 16);
4798 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4799 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4800 err = mgmt_cmd_complete(sk, hdev->id,
4801 MGMT_OP_SET_EXP_FEATURE, 0,
4802 &rp, sizeof(rp));
4803
4804 if (changed)
4805 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4806
4807 return err;
4808 }
4809
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4810 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4811 struct mgmt_cp_set_exp_feature *cp,
4812 u16 data_len)
4813 {
4814 bool val, changed;
4815 int err;
4816 struct mgmt_rp_set_exp_feature rp;
4817
4818 /* Command requires to use a valid controller index */
4819 if (!hdev)
4820 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4821 MGMT_OP_SET_EXP_FEATURE,
4822 MGMT_STATUS_INVALID_INDEX);
4823
4824 /* Parameters are limited to a single octet */
4825 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4826 return mgmt_cmd_status(sk, hdev->id,
4827 MGMT_OP_SET_EXP_FEATURE,
4828 MGMT_STATUS_INVALID_PARAMS);
4829
4830 /* Only boolean on/off is supported */
4831 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4832 return mgmt_cmd_status(sk, hdev->id,
4833 MGMT_OP_SET_EXP_FEATURE,
4834 MGMT_STATUS_INVALID_PARAMS);
4835
4836 val = !!cp->param[0];
4837 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4838
4839 if (!hci_dev_le_state_simultaneous(hdev)) {
4840 return mgmt_cmd_status(sk, hdev->id,
4841 MGMT_OP_SET_EXP_FEATURE,
4842 MGMT_STATUS_NOT_SUPPORTED);
4843 }
4844
4845 if (changed) {
4846 if (val)
4847 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4848 else
4849 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4850 }
4851
4852 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4853 val, changed);
4854
4855 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4856 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4857 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4858 err = mgmt_cmd_complete(sk, hdev->id,
4859 MGMT_OP_SET_EXP_FEATURE, 0,
4860 &rp, sizeof(rp));
4861
4862 if (changed)
4863 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4864
4865 return err;
4866 }
4867
4868 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4869 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4870 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4871 {
4872 struct mgmt_rp_set_exp_feature rp;
4873 bool val, changed = false;
4874 int err;
4875
4876 /* Command requires to use the non-controller index */
4877 if (hdev)
4878 return mgmt_cmd_status(sk, hdev->id,
4879 MGMT_OP_SET_EXP_FEATURE,
4880 MGMT_STATUS_INVALID_INDEX);
4881
4882 /* Parameters are limited to a single octet */
4883 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4884 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4885 MGMT_OP_SET_EXP_FEATURE,
4886 MGMT_STATUS_INVALID_PARAMS);
4887
4888 /* Only boolean on/off is supported */
4889 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4890 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4891 MGMT_OP_SET_EXP_FEATURE,
4892 MGMT_STATUS_INVALID_PARAMS);
4893
4894 val = cp->param[0] ? true : false;
4895 if (val)
4896 err = iso_init();
4897 else
4898 err = iso_exit();
4899
4900 if (!err)
4901 changed = true;
4902
4903 memcpy(rp.uuid, iso_socket_uuid, 16);
4904 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4905
4906 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4907
4908 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4909 MGMT_OP_SET_EXP_FEATURE, 0,
4910 &rp, sizeof(rp));
4911
4912 if (changed)
4913 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4914
4915 return err;
4916 }
4917 #endif
4918
4919 static const struct mgmt_exp_feature {
4920 const u8 *uuid;
4921 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4922 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4923 } exp_features[] = {
4924 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4925 #ifdef CONFIG_BT_FEATURE_DEBUG
4926 EXP_FEAT(debug_uuid, set_debug_func),
4927 #endif
4928 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4929 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4930 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4931 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4932 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4933 #ifdef CONFIG_BT_LE
4934 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4935 #endif
4936
4937 /* end with a null feature */
4938 EXP_FEAT(NULL, NULL)
4939 };
4940
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4941 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4942 void *data, u16 data_len)
4943 {
4944 struct mgmt_cp_set_exp_feature *cp = data;
4945 size_t i = 0;
4946
4947 bt_dev_dbg(hdev, "sock %p", sk);
4948
4949 for (i = 0; exp_features[i].uuid; i++) {
4950 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4951 return exp_features[i].set_func(sk, hdev, cp, data_len);
4952 }
4953
4954 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4955 MGMT_OP_SET_EXP_FEATURE,
4956 MGMT_STATUS_NOT_SUPPORTED);
4957 }
4958
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4959 static u32 get_params_flags(struct hci_dev *hdev,
4960 struct hci_conn_params *params)
4961 {
4962 u32 flags = hdev->conn_flags;
4963
4964 /* Devices using RPAs can only be programmed in the acceptlist if
4965 * LL Privacy has been enable otherwise they cannot mark
4966 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4967 */
4968 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4969 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
4970 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4971
4972 return flags;
4973 }
4974
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4975 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4976 u16 data_len)
4977 {
4978 struct mgmt_cp_get_device_flags *cp = data;
4979 struct mgmt_rp_get_device_flags rp;
4980 struct bdaddr_list_with_flags *br_params;
4981 struct hci_conn_params *params;
4982 u32 supported_flags;
4983 u32 current_flags = 0;
4984 u8 status = MGMT_STATUS_INVALID_PARAMS;
4985
4986 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4987 &cp->addr.bdaddr, cp->addr.type);
4988
4989 hci_dev_lock(hdev);
4990
4991 supported_flags = hdev->conn_flags;
4992
4993 memset(&rp, 0, sizeof(rp));
4994
4995 if (cp->addr.type == BDADDR_BREDR) {
4996 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4997 &cp->addr.bdaddr,
4998 cp->addr.type);
4999 if (!br_params)
5000 goto done;
5001
5002 current_flags = br_params->flags;
5003 } else {
5004 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5005 le_addr_type(cp->addr.type));
5006 if (!params)
5007 goto done;
5008
5009 supported_flags = get_params_flags(hdev, params);
5010 current_flags = params->flags;
5011 }
5012
5013 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5014 rp.addr.type = cp->addr.type;
5015 rp.supported_flags = cpu_to_le32(supported_flags);
5016 rp.current_flags = cpu_to_le32(current_flags);
5017
5018 status = MGMT_STATUS_SUCCESS;
5019
5020 done:
5021 hci_dev_unlock(hdev);
5022
5023 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5024 &rp, sizeof(rp));
5025 }
5026
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5027 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5028 bdaddr_t *bdaddr, u8 bdaddr_type,
5029 u32 supported_flags, u32 current_flags)
5030 {
5031 struct mgmt_ev_device_flags_changed ev;
5032
5033 bacpy(&ev.addr.bdaddr, bdaddr);
5034 ev.addr.type = bdaddr_type;
5035 ev.supported_flags = cpu_to_le32(supported_flags);
5036 ev.current_flags = cpu_to_le32(current_flags);
5037
5038 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5039 }
5040
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5041 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5042 u16 len)
5043 {
5044 struct mgmt_cp_set_device_flags *cp = data;
5045 struct bdaddr_list_with_flags *br_params;
5046 struct hci_conn_params *params;
5047 u8 status = MGMT_STATUS_INVALID_PARAMS;
5048 u32 supported_flags;
5049 u32 current_flags = __le32_to_cpu(cp->current_flags);
5050
5051 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5052 &cp->addr.bdaddr, cp->addr.type, current_flags);
5053
5054 // We should take hci_dev_lock() early, I think.. conn_flags can change
5055 supported_flags = hdev->conn_flags;
5056
5057 if ((supported_flags | current_flags) != supported_flags) {
5058 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5059 current_flags, supported_flags);
5060 goto done;
5061 }
5062
5063 hci_dev_lock(hdev);
5064
5065 if (cp->addr.type == BDADDR_BREDR) {
5066 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5067 &cp->addr.bdaddr,
5068 cp->addr.type);
5069
5070 if (br_params) {
5071 br_params->flags = current_flags;
5072 status = MGMT_STATUS_SUCCESS;
5073 } else {
5074 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5075 &cp->addr.bdaddr, cp->addr.type);
5076 }
5077
5078 goto unlock;
5079 }
5080
5081 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5082 le_addr_type(cp->addr.type));
5083 if (!params) {
5084 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5085 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5086 goto unlock;
5087 }
5088
5089 supported_flags = get_params_flags(hdev, params);
5090
5091 if ((supported_flags | current_flags) != supported_flags) {
5092 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5093 current_flags, supported_flags);
5094 goto unlock;
5095 }
5096
5097 WRITE_ONCE(params->flags, current_flags);
5098 status = MGMT_STATUS_SUCCESS;
5099
5100 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5101 * has been set.
5102 */
5103 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5104 hci_update_passive_scan(hdev);
5105
5106 unlock:
5107 hci_dev_unlock(hdev);
5108
5109 done:
5110 if (status == MGMT_STATUS_SUCCESS)
5111 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5112 supported_flags, current_flags);
5113
5114 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5115 &cp->addr, sizeof(cp->addr));
5116 }
5117
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5118 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5119 u16 handle)
5120 {
5121 struct mgmt_ev_adv_monitor_added ev;
5122
5123 ev.monitor_handle = cpu_to_le16(handle);
5124
5125 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5126 }
5127
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5128 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5129 {
5130 struct mgmt_ev_adv_monitor_removed ev;
5131 struct mgmt_pending_cmd *cmd;
5132 struct sock *sk_skip = NULL;
5133 struct mgmt_cp_remove_adv_monitor *cp;
5134
5135 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5136 if (cmd) {
5137 cp = cmd->param;
5138
5139 if (cp->monitor_handle)
5140 sk_skip = cmd->sk;
5141 }
5142
5143 ev.monitor_handle = cpu_to_le16(handle);
5144
5145 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5146 }
5147
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5148 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5149 void *data, u16 len)
5150 {
5151 struct adv_monitor *monitor = NULL;
5152 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5153 int handle, err;
5154 size_t rp_size = 0;
5155 __u32 supported = 0;
5156 __u32 enabled = 0;
5157 __u16 num_handles = 0;
5158 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5159
5160 BT_DBG("request for %s", hdev->name);
5161
5162 hci_dev_lock(hdev);
5163
5164 if (msft_monitor_supported(hdev))
5165 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5166
5167 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5168 handles[num_handles++] = monitor->handle;
5169
5170 hci_dev_unlock(hdev);
5171
5172 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5173 rp = kmalloc(rp_size, GFP_KERNEL);
5174 if (!rp)
5175 return -ENOMEM;
5176
5177 /* All supported features are currently enabled */
5178 enabled = supported;
5179
5180 rp->supported_features = cpu_to_le32(supported);
5181 rp->enabled_features = cpu_to_le32(enabled);
5182 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5183 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5184 rp->num_handles = cpu_to_le16(num_handles);
5185 if (num_handles)
5186 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5187
5188 err = mgmt_cmd_complete(sk, hdev->id,
5189 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5190 MGMT_STATUS_SUCCESS, rp, rp_size);
5191
5192 kfree(rp);
5193
5194 return err;
5195 }
5196
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5197 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5198 void *data, int status)
5199 {
5200 struct mgmt_rp_add_adv_patterns_monitor rp;
5201 struct mgmt_pending_cmd *cmd = data;
5202 struct adv_monitor *monitor = cmd->user_data;
5203
5204 hci_dev_lock(hdev);
5205
5206 rp.monitor_handle = cpu_to_le16(monitor->handle);
5207
5208 if (!status) {
5209 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5210 hdev->adv_monitors_cnt++;
5211 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5212 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5213 hci_update_passive_scan(hdev);
5214 }
5215
5216 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5217 mgmt_status(status), &rp, sizeof(rp));
5218 mgmt_pending_remove(cmd);
5219
5220 hci_dev_unlock(hdev);
5221 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5222 rp.monitor_handle, status);
5223 }
5224
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5225 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5226 {
5227 struct mgmt_pending_cmd *cmd = data;
5228 struct adv_monitor *monitor = cmd->user_data;
5229
5230 return hci_add_adv_monitor(hdev, monitor);
5231 }
5232
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5233 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5234 struct adv_monitor *m, u8 status,
5235 void *data, u16 len, u16 op)
5236 {
5237 struct mgmt_pending_cmd *cmd;
5238 int err;
5239
5240 hci_dev_lock(hdev);
5241
5242 if (status)
5243 goto unlock;
5244
5245 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5246 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5247 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5248 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5249 status = MGMT_STATUS_BUSY;
5250 goto unlock;
5251 }
5252
5253 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5254 if (!cmd) {
5255 status = MGMT_STATUS_NO_RESOURCES;
5256 goto unlock;
5257 }
5258
5259 cmd->user_data = m;
5260 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5261 mgmt_add_adv_patterns_monitor_complete);
5262 if (err) {
5263 if (err == -ENOMEM)
5264 status = MGMT_STATUS_NO_RESOURCES;
5265 else
5266 status = MGMT_STATUS_FAILED;
5267
5268 goto unlock;
5269 }
5270
5271 hci_dev_unlock(hdev);
5272
5273 return 0;
5274
5275 unlock:
5276 hci_free_adv_monitor(hdev, m);
5277 hci_dev_unlock(hdev);
5278 return mgmt_cmd_status(sk, hdev->id, op, status);
5279 }
5280
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5281 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5282 struct mgmt_adv_rssi_thresholds *rssi)
5283 {
5284 if (rssi) {
5285 m->rssi.low_threshold = rssi->low_threshold;
5286 m->rssi.low_threshold_timeout =
5287 __le16_to_cpu(rssi->low_threshold_timeout);
5288 m->rssi.high_threshold = rssi->high_threshold;
5289 m->rssi.high_threshold_timeout =
5290 __le16_to_cpu(rssi->high_threshold_timeout);
5291 m->rssi.sampling_period = rssi->sampling_period;
5292 } else {
5293 /* Default values. These numbers are the least constricting
5294 * parameters for MSFT API to work, so it behaves as if there
5295 * are no rssi parameter to consider. May need to be changed
5296 * if other API are to be supported.
5297 */
5298 m->rssi.low_threshold = -127;
5299 m->rssi.low_threshold_timeout = 60;
5300 m->rssi.high_threshold = -127;
5301 m->rssi.high_threshold_timeout = 0;
5302 m->rssi.sampling_period = 0;
5303 }
5304 }
5305
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5306 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5307 struct mgmt_adv_pattern *patterns)
5308 {
5309 u8 offset = 0, length = 0;
5310 struct adv_pattern *p = NULL;
5311 int i;
5312
5313 for (i = 0; i < pattern_count; i++) {
5314 offset = patterns[i].offset;
5315 length = patterns[i].length;
5316 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5317 length > HCI_MAX_EXT_AD_LENGTH ||
5318 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5319 return MGMT_STATUS_INVALID_PARAMS;
5320
5321 p = kmalloc(sizeof(*p), GFP_KERNEL);
5322 if (!p)
5323 return MGMT_STATUS_NO_RESOURCES;
5324
5325 p->ad_type = patterns[i].ad_type;
5326 p->offset = patterns[i].offset;
5327 p->length = patterns[i].length;
5328 memcpy(p->value, patterns[i].value, p->length);
5329
5330 INIT_LIST_HEAD(&p->list);
5331 list_add(&p->list, &m->patterns);
5332 }
5333
5334 return MGMT_STATUS_SUCCESS;
5335 }
5336
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5337 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5338 void *data, u16 len)
5339 {
5340 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5341 struct adv_monitor *m = NULL;
5342 u8 status = MGMT_STATUS_SUCCESS;
5343 size_t expected_size = sizeof(*cp);
5344
5345 BT_DBG("request for %s", hdev->name);
5346
5347 if (len <= sizeof(*cp)) {
5348 status = MGMT_STATUS_INVALID_PARAMS;
5349 goto done;
5350 }
5351
5352 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5353 if (len != expected_size) {
5354 status = MGMT_STATUS_INVALID_PARAMS;
5355 goto done;
5356 }
5357
5358 m = kzalloc(sizeof(*m), GFP_KERNEL);
5359 if (!m) {
5360 status = MGMT_STATUS_NO_RESOURCES;
5361 goto done;
5362 }
5363
5364 INIT_LIST_HEAD(&m->patterns);
5365
5366 parse_adv_monitor_rssi(m, NULL);
5367 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5368
5369 done:
5370 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5371 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5372 }
5373
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5374 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5375 void *data, u16 len)
5376 {
5377 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5378 struct adv_monitor *m = NULL;
5379 u8 status = MGMT_STATUS_SUCCESS;
5380 size_t expected_size = sizeof(*cp);
5381
5382 BT_DBG("request for %s", hdev->name);
5383
5384 if (len <= sizeof(*cp)) {
5385 status = MGMT_STATUS_INVALID_PARAMS;
5386 goto done;
5387 }
5388
5389 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5390 if (len != expected_size) {
5391 status = MGMT_STATUS_INVALID_PARAMS;
5392 goto done;
5393 }
5394
5395 m = kzalloc(sizeof(*m), GFP_KERNEL);
5396 if (!m) {
5397 status = MGMT_STATUS_NO_RESOURCES;
5398 goto done;
5399 }
5400
5401 INIT_LIST_HEAD(&m->patterns);
5402
5403 parse_adv_monitor_rssi(m, &cp->rssi);
5404 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5405
5406 done:
5407 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5408 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5409 }
5410
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5411 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5412 void *data, int status)
5413 {
5414 struct mgmt_rp_remove_adv_monitor rp;
5415 struct mgmt_pending_cmd *cmd = data;
5416 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5417
5418 hci_dev_lock(hdev);
5419
5420 rp.monitor_handle = cp->monitor_handle;
5421
5422 if (!status)
5423 hci_update_passive_scan(hdev);
5424
5425 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5426 mgmt_status(status), &rp, sizeof(rp));
5427 mgmt_pending_remove(cmd);
5428
5429 hci_dev_unlock(hdev);
5430 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5431 rp.monitor_handle, status);
5432 }
5433
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5434 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5435 {
5436 struct mgmt_pending_cmd *cmd = data;
5437 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5438 u16 handle = __le16_to_cpu(cp->monitor_handle);
5439
5440 if (!handle)
5441 return hci_remove_all_adv_monitor(hdev);
5442
5443 return hci_remove_single_adv_monitor(hdev, handle);
5444 }
5445
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5446 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5447 void *data, u16 len)
5448 {
5449 struct mgmt_pending_cmd *cmd;
5450 int err, status;
5451
5452 hci_dev_lock(hdev);
5453
5454 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5455 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5456 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5457 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5458 status = MGMT_STATUS_BUSY;
5459 goto unlock;
5460 }
5461
5462 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5463 if (!cmd) {
5464 status = MGMT_STATUS_NO_RESOURCES;
5465 goto unlock;
5466 }
5467
5468 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5469 mgmt_remove_adv_monitor_complete);
5470
5471 if (err) {
5472 mgmt_pending_remove(cmd);
5473
5474 if (err == -ENOMEM)
5475 status = MGMT_STATUS_NO_RESOURCES;
5476 else
5477 status = MGMT_STATUS_FAILED;
5478
5479 goto unlock;
5480 }
5481
5482 hci_dev_unlock(hdev);
5483
5484 return 0;
5485
5486 unlock:
5487 hci_dev_unlock(hdev);
5488 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5489 status);
5490 }
5491
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5492 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5493 {
5494 struct mgmt_rp_read_local_oob_data mgmt_rp;
5495 size_t rp_size = sizeof(mgmt_rp);
5496 struct mgmt_pending_cmd *cmd = data;
5497 struct sk_buff *skb = cmd->skb;
5498 u8 status = mgmt_status(err);
5499
5500 if (!status) {
5501 if (!skb)
5502 status = MGMT_STATUS_FAILED;
5503 else if (IS_ERR(skb))
5504 status = mgmt_status(PTR_ERR(skb));
5505 else
5506 status = mgmt_status(skb->data[0]);
5507 }
5508
5509 bt_dev_dbg(hdev, "status %d", status);
5510
5511 if (status) {
5512 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5513 goto remove;
5514 }
5515
5516 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5517
5518 if (!bredr_sc_enabled(hdev)) {
5519 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5520
5521 if (skb->len < sizeof(*rp)) {
5522 mgmt_cmd_status(cmd->sk, hdev->id,
5523 MGMT_OP_READ_LOCAL_OOB_DATA,
5524 MGMT_STATUS_FAILED);
5525 goto remove;
5526 }
5527
5528 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5529 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5530
5531 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5532 } else {
5533 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5534
5535 if (skb->len < sizeof(*rp)) {
5536 mgmt_cmd_status(cmd->sk, hdev->id,
5537 MGMT_OP_READ_LOCAL_OOB_DATA,
5538 MGMT_STATUS_FAILED);
5539 goto remove;
5540 }
5541
5542 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5543 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5544
5545 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5546 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5547 }
5548
5549 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5550 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5551
5552 remove:
5553 if (skb && !IS_ERR(skb))
5554 kfree_skb(skb);
5555
5556 mgmt_pending_free(cmd);
5557 }
5558
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5559 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5560 {
5561 struct mgmt_pending_cmd *cmd = data;
5562
5563 if (bredr_sc_enabled(hdev))
5564 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5565 else
5566 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5567
5568 if (IS_ERR(cmd->skb))
5569 return PTR_ERR(cmd->skb);
5570 else
5571 return 0;
5572 }
5573
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5574 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5575 void *data, u16 data_len)
5576 {
5577 struct mgmt_pending_cmd *cmd;
5578 int err;
5579
5580 bt_dev_dbg(hdev, "sock %p", sk);
5581
5582 hci_dev_lock(hdev);
5583
5584 if (!hdev_is_powered(hdev)) {
5585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5586 MGMT_STATUS_NOT_POWERED);
5587 goto unlock;
5588 }
5589
5590 if (!lmp_ssp_capable(hdev)) {
5591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5592 MGMT_STATUS_NOT_SUPPORTED);
5593 goto unlock;
5594 }
5595
5596 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5597 if (!cmd)
5598 err = -ENOMEM;
5599 else
5600 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5601 read_local_oob_data_complete);
5602
5603 if (err < 0) {
5604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5605 MGMT_STATUS_FAILED);
5606
5607 if (cmd)
5608 mgmt_pending_free(cmd);
5609 }
5610
5611 unlock:
5612 hci_dev_unlock(hdev);
5613 return err;
5614 }
5615
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5616 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5617 void *data, u16 len)
5618 {
5619 struct mgmt_addr_info *addr = data;
5620 int err;
5621
5622 bt_dev_dbg(hdev, "sock %p", sk);
5623
5624 if (!bdaddr_type_is_valid(addr->type))
5625 return mgmt_cmd_complete(sk, hdev->id,
5626 MGMT_OP_ADD_REMOTE_OOB_DATA,
5627 MGMT_STATUS_INVALID_PARAMS,
5628 addr, sizeof(*addr));
5629
5630 hci_dev_lock(hdev);
5631
5632 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5633 struct mgmt_cp_add_remote_oob_data *cp = data;
5634 u8 status;
5635
5636 if (cp->addr.type != BDADDR_BREDR) {
5637 err = mgmt_cmd_complete(sk, hdev->id,
5638 MGMT_OP_ADD_REMOTE_OOB_DATA,
5639 MGMT_STATUS_INVALID_PARAMS,
5640 &cp->addr, sizeof(cp->addr));
5641 goto unlock;
5642 }
5643
5644 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5645 cp->addr.type, cp->hash,
5646 cp->rand, NULL, NULL);
5647 if (err < 0)
5648 status = MGMT_STATUS_FAILED;
5649 else
5650 status = MGMT_STATUS_SUCCESS;
5651
5652 err = mgmt_cmd_complete(sk, hdev->id,
5653 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5654 &cp->addr, sizeof(cp->addr));
5655 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5656 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5657 u8 *rand192, *hash192, *rand256, *hash256;
5658 u8 status;
5659
5660 if (bdaddr_type_is_le(cp->addr.type)) {
5661 /* Enforce zero-valued 192-bit parameters as
5662 * long as legacy SMP OOB isn't implemented.
5663 */
5664 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5665 memcmp(cp->hash192, ZERO_KEY, 16)) {
5666 err = mgmt_cmd_complete(sk, hdev->id,
5667 MGMT_OP_ADD_REMOTE_OOB_DATA,
5668 MGMT_STATUS_INVALID_PARAMS,
5669 addr, sizeof(*addr));
5670 goto unlock;
5671 }
5672
5673 rand192 = NULL;
5674 hash192 = NULL;
5675 } else {
5676 /* In case one of the P-192 values is set to zero,
5677 * then just disable OOB data for P-192.
5678 */
5679 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5680 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5681 rand192 = NULL;
5682 hash192 = NULL;
5683 } else {
5684 rand192 = cp->rand192;
5685 hash192 = cp->hash192;
5686 }
5687 }
5688
5689 /* In case one of the P-256 values is set to zero, then just
5690 * disable OOB data for P-256.
5691 */
5692 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5693 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5694 rand256 = NULL;
5695 hash256 = NULL;
5696 } else {
5697 rand256 = cp->rand256;
5698 hash256 = cp->hash256;
5699 }
5700
5701 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5702 cp->addr.type, hash192, rand192,
5703 hash256, rand256);
5704 if (err < 0)
5705 status = MGMT_STATUS_FAILED;
5706 else
5707 status = MGMT_STATUS_SUCCESS;
5708
5709 err = mgmt_cmd_complete(sk, hdev->id,
5710 MGMT_OP_ADD_REMOTE_OOB_DATA,
5711 status, &cp->addr, sizeof(cp->addr));
5712 } else {
5713 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5714 len);
5715 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5716 MGMT_STATUS_INVALID_PARAMS);
5717 }
5718
5719 unlock:
5720 hci_dev_unlock(hdev);
5721 return err;
5722 }
5723
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5724 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5725 void *data, u16 len)
5726 {
5727 struct mgmt_cp_remove_remote_oob_data *cp = data;
5728 u8 status;
5729 int err;
5730
5731 bt_dev_dbg(hdev, "sock %p", sk);
5732
5733 if (cp->addr.type != BDADDR_BREDR)
5734 return mgmt_cmd_complete(sk, hdev->id,
5735 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5736 MGMT_STATUS_INVALID_PARAMS,
5737 &cp->addr, sizeof(cp->addr));
5738
5739 hci_dev_lock(hdev);
5740
5741 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5742 hci_remote_oob_data_clear(hdev);
5743 status = MGMT_STATUS_SUCCESS;
5744 goto done;
5745 }
5746
5747 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5748 if (err < 0)
5749 status = MGMT_STATUS_INVALID_PARAMS;
5750 else
5751 status = MGMT_STATUS_SUCCESS;
5752
5753 done:
5754 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5755 status, &cp->addr, sizeof(cp->addr));
5756
5757 hci_dev_unlock(hdev);
5758 return err;
5759 }
5760
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5761 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5762 {
5763 struct mgmt_pending_cmd *cmd;
5764
5765 bt_dev_dbg(hdev, "status %u", status);
5766
5767 hci_dev_lock(hdev);
5768
5769 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5770 if (!cmd)
5771 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5772
5773 if (!cmd)
5774 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5775
5776 if (cmd) {
5777 cmd->cmd_complete(cmd, mgmt_status(status));
5778 mgmt_pending_remove(cmd);
5779 }
5780
5781 hci_dev_unlock(hdev);
5782 }
5783
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5784 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5785 uint8_t *mgmt_status)
5786 {
5787 switch (type) {
5788 case DISCOV_TYPE_LE:
5789 *mgmt_status = mgmt_le_support(hdev);
5790 if (*mgmt_status)
5791 return false;
5792 break;
5793 case DISCOV_TYPE_INTERLEAVED:
5794 *mgmt_status = mgmt_le_support(hdev);
5795 if (*mgmt_status)
5796 return false;
5797 fallthrough;
5798 case DISCOV_TYPE_BREDR:
5799 *mgmt_status = mgmt_bredr_support(hdev);
5800 if (*mgmt_status)
5801 return false;
5802 break;
5803 default:
5804 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5805 return false;
5806 }
5807
5808 return true;
5809 }
5810
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5811 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5812 {
5813 struct mgmt_pending_cmd *cmd = data;
5814
5815 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5816 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5817 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5818 return;
5819
5820 bt_dev_dbg(hdev, "err %d", err);
5821
5822 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5823 cmd->param, 1);
5824 mgmt_pending_remove(cmd);
5825
5826 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5827 DISCOVERY_FINDING);
5828 }
5829
start_discovery_sync(struct hci_dev * hdev,void * data)5830 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5831 {
5832 return hci_start_discovery_sync(hdev);
5833 }
5834
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5835 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5836 u16 op, void *data, u16 len)
5837 {
5838 struct mgmt_cp_start_discovery *cp = data;
5839 struct mgmt_pending_cmd *cmd;
5840 u8 status;
5841 int err;
5842
5843 bt_dev_dbg(hdev, "sock %p", sk);
5844
5845 hci_dev_lock(hdev);
5846
5847 if (!hdev_is_powered(hdev)) {
5848 err = mgmt_cmd_complete(sk, hdev->id, op,
5849 MGMT_STATUS_NOT_POWERED,
5850 &cp->type, sizeof(cp->type));
5851 goto failed;
5852 }
5853
5854 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5855 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5856 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5857 &cp->type, sizeof(cp->type));
5858 goto failed;
5859 }
5860
5861 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5862 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5863 &cp->type, sizeof(cp->type));
5864 goto failed;
5865 }
5866
5867 /* Can't start discovery when it is paused */
5868 if (hdev->discovery_paused) {
5869 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5870 &cp->type, sizeof(cp->type));
5871 goto failed;
5872 }
5873
5874 /* Clear the discovery filter first to free any previously
5875 * allocated memory for the UUID list.
5876 */
5877 hci_discovery_filter_clear(hdev);
5878
5879 hdev->discovery.type = cp->type;
5880 hdev->discovery.report_invalid_rssi = false;
5881 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5882 hdev->discovery.limited = true;
5883 else
5884 hdev->discovery.limited = false;
5885
5886 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5887 if (!cmd) {
5888 err = -ENOMEM;
5889 goto failed;
5890 }
5891
5892 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5893 start_discovery_complete);
5894 if (err < 0) {
5895 mgmt_pending_remove(cmd);
5896 goto failed;
5897 }
5898
5899 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5900
5901 failed:
5902 hci_dev_unlock(hdev);
5903 return err;
5904 }
5905
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5906 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5907 void *data, u16 len)
5908 {
5909 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5910 data, len);
5911 }
5912
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5913 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5914 void *data, u16 len)
5915 {
5916 return start_discovery_internal(sk, hdev,
5917 MGMT_OP_START_LIMITED_DISCOVERY,
5918 data, len);
5919 }
5920
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5921 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5922 void *data, u16 len)
5923 {
5924 struct mgmt_cp_start_service_discovery *cp = data;
5925 struct mgmt_pending_cmd *cmd;
5926 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5927 u16 uuid_count, expected_len;
5928 u8 status;
5929 int err;
5930
5931 bt_dev_dbg(hdev, "sock %p", sk);
5932
5933 hci_dev_lock(hdev);
5934
5935 if (!hdev_is_powered(hdev)) {
5936 err = mgmt_cmd_complete(sk, hdev->id,
5937 MGMT_OP_START_SERVICE_DISCOVERY,
5938 MGMT_STATUS_NOT_POWERED,
5939 &cp->type, sizeof(cp->type));
5940 goto failed;
5941 }
5942
5943 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5944 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5945 err = mgmt_cmd_complete(sk, hdev->id,
5946 MGMT_OP_START_SERVICE_DISCOVERY,
5947 MGMT_STATUS_BUSY, &cp->type,
5948 sizeof(cp->type));
5949 goto failed;
5950 }
5951
5952 if (hdev->discovery_paused) {
5953 err = mgmt_cmd_complete(sk, hdev->id,
5954 MGMT_OP_START_SERVICE_DISCOVERY,
5955 MGMT_STATUS_BUSY, &cp->type,
5956 sizeof(cp->type));
5957 goto failed;
5958 }
5959
5960 uuid_count = __le16_to_cpu(cp->uuid_count);
5961 if (uuid_count > max_uuid_count) {
5962 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5963 uuid_count);
5964 err = mgmt_cmd_complete(sk, hdev->id,
5965 MGMT_OP_START_SERVICE_DISCOVERY,
5966 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5967 sizeof(cp->type));
5968 goto failed;
5969 }
5970
5971 expected_len = sizeof(*cp) + uuid_count * 16;
5972 if (expected_len != len) {
5973 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5974 expected_len, len);
5975 err = mgmt_cmd_complete(sk, hdev->id,
5976 MGMT_OP_START_SERVICE_DISCOVERY,
5977 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5978 sizeof(cp->type));
5979 goto failed;
5980 }
5981
5982 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5983 err = mgmt_cmd_complete(sk, hdev->id,
5984 MGMT_OP_START_SERVICE_DISCOVERY,
5985 status, &cp->type, sizeof(cp->type));
5986 goto failed;
5987 }
5988
5989 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5990 hdev, data, len);
5991 if (!cmd) {
5992 err = -ENOMEM;
5993 goto failed;
5994 }
5995
5996 /* Clear the discovery filter first to free any previously
5997 * allocated memory for the UUID list.
5998 */
5999 hci_discovery_filter_clear(hdev);
6000
6001 hdev->discovery.result_filtering = true;
6002 hdev->discovery.type = cp->type;
6003 hdev->discovery.rssi = cp->rssi;
6004 hdev->discovery.uuid_count = uuid_count;
6005
6006 if (uuid_count > 0) {
6007 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6008 GFP_KERNEL);
6009 if (!hdev->discovery.uuids) {
6010 err = mgmt_cmd_complete(sk, hdev->id,
6011 MGMT_OP_START_SERVICE_DISCOVERY,
6012 MGMT_STATUS_FAILED,
6013 &cp->type, sizeof(cp->type));
6014 mgmt_pending_remove(cmd);
6015 goto failed;
6016 }
6017 }
6018
6019 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6020 start_discovery_complete);
6021 if (err < 0) {
6022 mgmt_pending_remove(cmd);
6023 goto failed;
6024 }
6025
6026 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6027
6028 failed:
6029 hci_dev_unlock(hdev);
6030 return err;
6031 }
6032
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6033 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6034 {
6035 struct mgmt_pending_cmd *cmd;
6036
6037 bt_dev_dbg(hdev, "status %u", status);
6038
6039 hci_dev_lock(hdev);
6040
6041 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6042 if (cmd) {
6043 cmd->cmd_complete(cmd, mgmt_status(status));
6044 mgmt_pending_remove(cmd);
6045 }
6046
6047 hci_dev_unlock(hdev);
6048 }
6049
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6050 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6051 {
6052 struct mgmt_pending_cmd *cmd = data;
6053
6054 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6055 return;
6056
6057 bt_dev_dbg(hdev, "err %d", err);
6058
6059 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6060 cmd->param, 1);
6061 mgmt_pending_remove(cmd);
6062
6063 if (!err)
6064 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6065 }
6066
stop_discovery_sync(struct hci_dev * hdev,void * data)6067 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6068 {
6069 return hci_stop_discovery_sync(hdev);
6070 }
6071
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6072 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6073 u16 len)
6074 {
6075 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6076 struct mgmt_pending_cmd *cmd;
6077 int err;
6078
6079 bt_dev_dbg(hdev, "sock %p", sk);
6080
6081 hci_dev_lock(hdev);
6082
6083 if (!hci_discovery_active(hdev)) {
6084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6085 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6086 sizeof(mgmt_cp->type));
6087 goto unlock;
6088 }
6089
6090 if (hdev->discovery.type != mgmt_cp->type) {
6091 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6092 MGMT_STATUS_INVALID_PARAMS,
6093 &mgmt_cp->type, sizeof(mgmt_cp->type));
6094 goto unlock;
6095 }
6096
6097 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6098 if (!cmd) {
6099 err = -ENOMEM;
6100 goto unlock;
6101 }
6102
6103 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6104 stop_discovery_complete);
6105 if (err < 0) {
6106 mgmt_pending_remove(cmd);
6107 goto unlock;
6108 }
6109
6110 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6111
6112 unlock:
6113 hci_dev_unlock(hdev);
6114 return err;
6115 }
6116
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6117 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6118 u16 len)
6119 {
6120 struct mgmt_cp_confirm_name *cp = data;
6121 struct inquiry_entry *e;
6122 int err;
6123
6124 bt_dev_dbg(hdev, "sock %p", sk);
6125
6126 hci_dev_lock(hdev);
6127
6128 if (!hci_discovery_active(hdev)) {
6129 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6130 MGMT_STATUS_FAILED, &cp->addr,
6131 sizeof(cp->addr));
6132 goto failed;
6133 }
6134
6135 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6136 if (!e) {
6137 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6138 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6139 sizeof(cp->addr));
6140 goto failed;
6141 }
6142
6143 if (cp->name_known) {
6144 e->name_state = NAME_KNOWN;
6145 list_del(&e->list);
6146 } else {
6147 e->name_state = NAME_NEEDED;
6148 hci_inquiry_cache_update_resolve(hdev, e);
6149 }
6150
6151 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6152 &cp->addr, sizeof(cp->addr));
6153
6154 failed:
6155 hci_dev_unlock(hdev);
6156 return err;
6157 }
6158
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6159 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6160 u16 len)
6161 {
6162 struct mgmt_cp_block_device *cp = data;
6163 u8 status;
6164 int err;
6165
6166 bt_dev_dbg(hdev, "sock %p", sk);
6167
6168 if (!bdaddr_type_is_valid(cp->addr.type))
6169 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6170 MGMT_STATUS_INVALID_PARAMS,
6171 &cp->addr, sizeof(cp->addr));
6172
6173 hci_dev_lock(hdev);
6174
6175 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6176 cp->addr.type);
6177 if (err < 0) {
6178 status = MGMT_STATUS_FAILED;
6179 goto done;
6180 }
6181
6182 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6183 sk);
6184 status = MGMT_STATUS_SUCCESS;
6185
6186 done:
6187 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6188 &cp->addr, sizeof(cp->addr));
6189
6190 hci_dev_unlock(hdev);
6191
6192 return err;
6193 }
6194
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6195 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6196 u16 len)
6197 {
6198 struct mgmt_cp_unblock_device *cp = data;
6199 u8 status;
6200 int err;
6201
6202 bt_dev_dbg(hdev, "sock %p", sk);
6203
6204 if (!bdaddr_type_is_valid(cp->addr.type))
6205 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6206 MGMT_STATUS_INVALID_PARAMS,
6207 &cp->addr, sizeof(cp->addr));
6208
6209 hci_dev_lock(hdev);
6210
6211 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6212 cp->addr.type);
6213 if (err < 0) {
6214 status = MGMT_STATUS_INVALID_PARAMS;
6215 goto done;
6216 }
6217
6218 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6219 sk);
6220 status = MGMT_STATUS_SUCCESS;
6221
6222 done:
6223 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6224 &cp->addr, sizeof(cp->addr));
6225
6226 hci_dev_unlock(hdev);
6227
6228 return err;
6229 }
6230
set_device_id_sync(struct hci_dev * hdev,void * data)6231 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6232 {
6233 return hci_update_eir_sync(hdev);
6234 }
6235
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6236 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6237 u16 len)
6238 {
6239 struct mgmt_cp_set_device_id *cp = data;
6240 int err;
6241 __u16 source;
6242
6243 bt_dev_dbg(hdev, "sock %p", sk);
6244
6245 source = __le16_to_cpu(cp->source);
6246
6247 if (source > 0x0002)
6248 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6249 MGMT_STATUS_INVALID_PARAMS);
6250
6251 hci_dev_lock(hdev);
6252
6253 hdev->devid_source = source;
6254 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6255 hdev->devid_product = __le16_to_cpu(cp->product);
6256 hdev->devid_version = __le16_to_cpu(cp->version);
6257
6258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6259 NULL, 0);
6260
6261 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6262
6263 hci_dev_unlock(hdev);
6264
6265 return err;
6266 }
6267
enable_advertising_instance(struct hci_dev * hdev,int err)6268 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6269 {
6270 if (err)
6271 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6272 else
6273 bt_dev_dbg(hdev, "status %d", err);
6274 }
6275
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6276 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6277 {
6278 struct cmd_lookup match = { NULL, hdev };
6279 u8 instance;
6280 struct adv_info *adv_instance;
6281 u8 status = mgmt_status(err);
6282
6283 if (status) {
6284 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6285 cmd_status_rsp, &status);
6286 return;
6287 }
6288
6289 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6290 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6291 else
6292 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6293
6294 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6295 &match);
6296
6297 new_settings(hdev, match.sk);
6298
6299 if (match.sk)
6300 sock_put(match.sk);
6301
6302 /* If "Set Advertising" was just disabled and instance advertising was
6303 * set up earlier, then re-enable multi-instance advertising.
6304 */
6305 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6306 list_empty(&hdev->adv_instances))
6307 return;
6308
6309 instance = hdev->cur_adv_instance;
6310 if (!instance) {
6311 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6312 struct adv_info, list);
6313 if (!adv_instance)
6314 return;
6315
6316 instance = adv_instance->instance;
6317 }
6318
6319 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6320
6321 enable_advertising_instance(hdev, err);
6322 }
6323
set_adv_sync(struct hci_dev * hdev,void * data)6324 static int set_adv_sync(struct hci_dev *hdev, void *data)
6325 {
6326 struct mgmt_pending_cmd *cmd = data;
6327 struct mgmt_mode *cp = cmd->param;
6328 u8 val = !!cp->val;
6329
6330 if (cp->val == 0x02)
6331 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6332 else
6333 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6334
6335 cancel_adv_timeout(hdev);
6336
6337 if (val) {
6338 /* Switch to instance "0" for the Set Advertising setting.
6339 * We cannot use update_[adv|scan_rsp]_data() here as the
6340 * HCI_ADVERTISING flag is not yet set.
6341 */
6342 hdev->cur_adv_instance = 0x00;
6343
6344 if (ext_adv_capable(hdev)) {
6345 hci_start_ext_adv_sync(hdev, 0x00);
6346 } else {
6347 hci_update_adv_data_sync(hdev, 0x00);
6348 hci_update_scan_rsp_data_sync(hdev, 0x00);
6349 hci_enable_advertising_sync(hdev);
6350 }
6351 } else {
6352 hci_disable_advertising_sync(hdev);
6353 }
6354
6355 return 0;
6356 }
6357
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6358 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6359 u16 len)
6360 {
6361 struct mgmt_mode *cp = data;
6362 struct mgmt_pending_cmd *cmd;
6363 u8 val, status;
6364 int err;
6365
6366 bt_dev_dbg(hdev, "sock %p", sk);
6367
6368 status = mgmt_le_support(hdev);
6369 if (status)
6370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6371 status);
6372
6373 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6374 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6375 MGMT_STATUS_INVALID_PARAMS);
6376
6377 if (hdev->advertising_paused)
6378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6379 MGMT_STATUS_BUSY);
6380
6381 hci_dev_lock(hdev);
6382
6383 val = !!cp->val;
6384
6385 /* The following conditions are ones which mean that we should
6386 * not do any HCI communication but directly send a mgmt
6387 * response to user space (after toggling the flag if
6388 * necessary).
6389 */
6390 if (!hdev_is_powered(hdev) ||
6391 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6392 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6393 hci_dev_test_flag(hdev, HCI_MESH) ||
6394 hci_conn_num(hdev, LE_LINK) > 0 ||
6395 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6396 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6397 bool changed;
6398
6399 if (cp->val) {
6400 hdev->cur_adv_instance = 0x00;
6401 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6402 if (cp->val == 0x02)
6403 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6404 else
6405 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6406 } else {
6407 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6408 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6409 }
6410
6411 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6412 if (err < 0)
6413 goto unlock;
6414
6415 if (changed)
6416 err = new_settings(hdev, sk);
6417
6418 goto unlock;
6419 }
6420
6421 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6422 pending_find(MGMT_OP_SET_LE, hdev)) {
6423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6424 MGMT_STATUS_BUSY);
6425 goto unlock;
6426 }
6427
6428 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6429 if (!cmd)
6430 err = -ENOMEM;
6431 else
6432 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6433 set_advertising_complete);
6434
6435 if (err < 0 && cmd)
6436 mgmt_pending_remove(cmd);
6437
6438 unlock:
6439 hci_dev_unlock(hdev);
6440 return err;
6441 }
6442
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6443 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6444 void *data, u16 len)
6445 {
6446 struct mgmt_cp_set_static_address *cp = data;
6447 int err;
6448
6449 bt_dev_dbg(hdev, "sock %p", sk);
6450
6451 if (!lmp_le_capable(hdev))
6452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6453 MGMT_STATUS_NOT_SUPPORTED);
6454
6455 if (hdev_is_powered(hdev))
6456 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6457 MGMT_STATUS_REJECTED);
6458
6459 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6460 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6461 return mgmt_cmd_status(sk, hdev->id,
6462 MGMT_OP_SET_STATIC_ADDRESS,
6463 MGMT_STATUS_INVALID_PARAMS);
6464
6465 /* Two most significant bits shall be set */
6466 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6467 return mgmt_cmd_status(sk, hdev->id,
6468 MGMT_OP_SET_STATIC_ADDRESS,
6469 MGMT_STATUS_INVALID_PARAMS);
6470 }
6471
6472 hci_dev_lock(hdev);
6473
6474 bacpy(&hdev->static_addr, &cp->bdaddr);
6475
6476 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6477 if (err < 0)
6478 goto unlock;
6479
6480 err = new_settings(hdev, sk);
6481
6482 unlock:
6483 hci_dev_unlock(hdev);
6484 return err;
6485 }
6486
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6487 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6488 void *data, u16 len)
6489 {
6490 struct mgmt_cp_set_scan_params *cp = data;
6491 __u16 interval, window;
6492 int err;
6493
6494 bt_dev_dbg(hdev, "sock %p", sk);
6495
6496 if (!lmp_le_capable(hdev))
6497 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6498 MGMT_STATUS_NOT_SUPPORTED);
6499
6500 interval = __le16_to_cpu(cp->interval);
6501
6502 if (interval < 0x0004 || interval > 0x4000)
6503 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6504 MGMT_STATUS_INVALID_PARAMS);
6505
6506 window = __le16_to_cpu(cp->window);
6507
6508 if (window < 0x0004 || window > 0x4000)
6509 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6510 MGMT_STATUS_INVALID_PARAMS);
6511
6512 if (window > interval)
6513 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6514 MGMT_STATUS_INVALID_PARAMS);
6515
6516 hci_dev_lock(hdev);
6517
6518 hdev->le_scan_interval = interval;
6519 hdev->le_scan_window = window;
6520
6521 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6522 NULL, 0);
6523
6524 /* If background scan is running, restart it so new parameters are
6525 * loaded.
6526 */
6527 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6528 hdev->discovery.state == DISCOVERY_STOPPED)
6529 hci_update_passive_scan(hdev);
6530
6531 hci_dev_unlock(hdev);
6532
6533 return err;
6534 }
6535
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6536 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6537 {
6538 struct mgmt_pending_cmd *cmd = data;
6539
6540 bt_dev_dbg(hdev, "err %d", err);
6541
6542 if (err) {
6543 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6544 mgmt_status(err));
6545 } else {
6546 struct mgmt_mode *cp = cmd->param;
6547
6548 if (cp->val)
6549 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6550 else
6551 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6552
6553 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6554 new_settings(hdev, cmd->sk);
6555 }
6556
6557 mgmt_pending_free(cmd);
6558 }
6559
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6560 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6561 {
6562 struct mgmt_pending_cmd *cmd = data;
6563 struct mgmt_mode *cp = cmd->param;
6564
6565 return hci_write_fast_connectable_sync(hdev, cp->val);
6566 }
6567
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6568 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6569 void *data, u16 len)
6570 {
6571 struct mgmt_mode *cp = data;
6572 struct mgmt_pending_cmd *cmd;
6573 int err;
6574
6575 bt_dev_dbg(hdev, "sock %p", sk);
6576
6577 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6578 hdev->hci_ver < BLUETOOTH_VER_1_2)
6579 return mgmt_cmd_status(sk, hdev->id,
6580 MGMT_OP_SET_FAST_CONNECTABLE,
6581 MGMT_STATUS_NOT_SUPPORTED);
6582
6583 if (cp->val != 0x00 && cp->val != 0x01)
6584 return mgmt_cmd_status(sk, hdev->id,
6585 MGMT_OP_SET_FAST_CONNECTABLE,
6586 MGMT_STATUS_INVALID_PARAMS);
6587
6588 hci_dev_lock(hdev);
6589
6590 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6591 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6592 goto unlock;
6593 }
6594
6595 if (!hdev_is_powered(hdev)) {
6596 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6597 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6598 new_settings(hdev, sk);
6599 goto unlock;
6600 }
6601
6602 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6603 len);
6604 if (!cmd)
6605 err = -ENOMEM;
6606 else
6607 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6608 fast_connectable_complete);
6609
6610 if (err < 0) {
6611 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6612 MGMT_STATUS_FAILED);
6613
6614 if (cmd)
6615 mgmt_pending_free(cmd);
6616 }
6617
6618 unlock:
6619 hci_dev_unlock(hdev);
6620
6621 return err;
6622 }
6623
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6624 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6625 {
6626 struct mgmt_pending_cmd *cmd = data;
6627
6628 bt_dev_dbg(hdev, "err %d", err);
6629
6630 if (err) {
6631 u8 mgmt_err = mgmt_status(err);
6632
6633 /* We need to restore the flag if related HCI commands
6634 * failed.
6635 */
6636 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6637
6638 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6639 } else {
6640 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6641 new_settings(hdev, cmd->sk);
6642 }
6643
6644 mgmt_pending_free(cmd);
6645 }
6646
set_bredr_sync(struct hci_dev * hdev,void * data)6647 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6648 {
6649 int status;
6650
6651 status = hci_write_fast_connectable_sync(hdev, false);
6652
6653 if (!status)
6654 status = hci_update_scan_sync(hdev);
6655
6656 /* Since only the advertising data flags will change, there
6657 * is no need to update the scan response data.
6658 */
6659 if (!status)
6660 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6661
6662 return status;
6663 }
6664
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6665 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6666 {
6667 struct mgmt_mode *cp = data;
6668 struct mgmt_pending_cmd *cmd;
6669 int err;
6670
6671 bt_dev_dbg(hdev, "sock %p", sk);
6672
6673 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6675 MGMT_STATUS_NOT_SUPPORTED);
6676
6677 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6679 MGMT_STATUS_REJECTED);
6680
6681 if (cp->val != 0x00 && cp->val != 0x01)
6682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6683 MGMT_STATUS_INVALID_PARAMS);
6684
6685 hci_dev_lock(hdev);
6686
6687 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6688 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6689 goto unlock;
6690 }
6691
6692 if (!hdev_is_powered(hdev)) {
6693 if (!cp->val) {
6694 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6695 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6696 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6697 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6698 }
6699
6700 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6701
6702 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6703 if (err < 0)
6704 goto unlock;
6705
6706 err = new_settings(hdev, sk);
6707 goto unlock;
6708 }
6709
6710 /* Reject disabling when powered on */
6711 if (!cp->val) {
6712 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6713 MGMT_STATUS_REJECTED);
6714 goto unlock;
6715 } else {
6716 /* When configuring a dual-mode controller to operate
6717 * with LE only and using a static address, then switching
6718 * BR/EDR back on is not allowed.
6719 *
6720 * Dual-mode controllers shall operate with the public
6721 * address as its identity address for BR/EDR and LE. So
6722 * reject the attempt to create an invalid configuration.
6723 *
6724 * The same restrictions applies when secure connections
6725 * has been enabled. For BR/EDR this is a controller feature
6726 * while for LE it is a host stack feature. This means that
6727 * switching BR/EDR back on when secure connections has been
6728 * enabled is not a supported transaction.
6729 */
6730 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6731 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6732 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6733 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6734 MGMT_STATUS_REJECTED);
6735 goto unlock;
6736 }
6737 }
6738
6739 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6740 if (!cmd)
6741 err = -ENOMEM;
6742 else
6743 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6744 set_bredr_complete);
6745
6746 if (err < 0) {
6747 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748 MGMT_STATUS_FAILED);
6749 if (cmd)
6750 mgmt_pending_free(cmd);
6751
6752 goto unlock;
6753 }
6754
6755 /* We need to flip the bit already here so that
6756 * hci_req_update_adv_data generates the correct flags.
6757 */
6758 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6759
6760 unlock:
6761 hci_dev_unlock(hdev);
6762 return err;
6763 }
6764
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6765 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6766 {
6767 struct mgmt_pending_cmd *cmd = data;
6768 struct mgmt_mode *cp;
6769
6770 bt_dev_dbg(hdev, "err %d", err);
6771
6772 if (err) {
6773 u8 mgmt_err = mgmt_status(err);
6774
6775 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6776 goto done;
6777 }
6778
6779 cp = cmd->param;
6780
6781 switch (cp->val) {
6782 case 0x00:
6783 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6784 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6785 break;
6786 case 0x01:
6787 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6788 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6789 break;
6790 case 0x02:
6791 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6792 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6793 break;
6794 }
6795
6796 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6797 new_settings(hdev, cmd->sk);
6798
6799 done:
6800 mgmt_pending_free(cmd);
6801 }
6802
set_secure_conn_sync(struct hci_dev * hdev,void * data)6803 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6804 {
6805 struct mgmt_pending_cmd *cmd = data;
6806 struct mgmt_mode *cp = cmd->param;
6807 u8 val = !!cp->val;
6808
6809 /* Force write of val */
6810 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6811
6812 return hci_write_sc_support_sync(hdev, val);
6813 }
6814
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6815 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6816 void *data, u16 len)
6817 {
6818 struct mgmt_mode *cp = data;
6819 struct mgmt_pending_cmd *cmd;
6820 u8 val;
6821 int err;
6822
6823 bt_dev_dbg(hdev, "sock %p", sk);
6824
6825 if (!lmp_sc_capable(hdev) &&
6826 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6828 MGMT_STATUS_NOT_SUPPORTED);
6829
6830 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6831 lmp_sc_capable(hdev) &&
6832 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6834 MGMT_STATUS_REJECTED);
6835
6836 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6838 MGMT_STATUS_INVALID_PARAMS);
6839
6840 hci_dev_lock(hdev);
6841
6842 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6843 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6844 bool changed;
6845
6846 if (cp->val) {
6847 changed = !hci_dev_test_and_set_flag(hdev,
6848 HCI_SC_ENABLED);
6849 if (cp->val == 0x02)
6850 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6851 else
6852 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6853 } else {
6854 changed = hci_dev_test_and_clear_flag(hdev,
6855 HCI_SC_ENABLED);
6856 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6857 }
6858
6859 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6860 if (err < 0)
6861 goto failed;
6862
6863 if (changed)
6864 err = new_settings(hdev, sk);
6865
6866 goto failed;
6867 }
6868
6869 val = !!cp->val;
6870
6871 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6872 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6873 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6874 goto failed;
6875 }
6876
6877 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6878 if (!cmd)
6879 err = -ENOMEM;
6880 else
6881 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6882 set_secure_conn_complete);
6883
6884 if (err < 0) {
6885 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6886 MGMT_STATUS_FAILED);
6887 if (cmd)
6888 mgmt_pending_free(cmd);
6889 }
6890
6891 failed:
6892 hci_dev_unlock(hdev);
6893 return err;
6894 }
6895
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6896 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6897 void *data, u16 len)
6898 {
6899 struct mgmt_mode *cp = data;
6900 bool changed, use_changed;
6901 int err;
6902
6903 bt_dev_dbg(hdev, "sock %p", sk);
6904
6905 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6906 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6907 MGMT_STATUS_INVALID_PARAMS);
6908
6909 hci_dev_lock(hdev);
6910
6911 if (cp->val)
6912 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6913 else
6914 changed = hci_dev_test_and_clear_flag(hdev,
6915 HCI_KEEP_DEBUG_KEYS);
6916
6917 if (cp->val == 0x02)
6918 use_changed = !hci_dev_test_and_set_flag(hdev,
6919 HCI_USE_DEBUG_KEYS);
6920 else
6921 use_changed = hci_dev_test_and_clear_flag(hdev,
6922 HCI_USE_DEBUG_KEYS);
6923
6924 if (hdev_is_powered(hdev) && use_changed &&
6925 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6926 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6927 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6928 sizeof(mode), &mode);
6929 }
6930
6931 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6932 if (err < 0)
6933 goto unlock;
6934
6935 if (changed)
6936 err = new_settings(hdev, sk);
6937
6938 unlock:
6939 hci_dev_unlock(hdev);
6940 return err;
6941 }
6942
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6943 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6944 u16 len)
6945 {
6946 struct mgmt_cp_set_privacy *cp = cp_data;
6947 bool changed;
6948 int err;
6949
6950 bt_dev_dbg(hdev, "sock %p", sk);
6951
6952 if (!lmp_le_capable(hdev))
6953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6954 MGMT_STATUS_NOT_SUPPORTED);
6955
6956 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6957 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6958 MGMT_STATUS_INVALID_PARAMS);
6959
6960 if (hdev_is_powered(hdev))
6961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6962 MGMT_STATUS_REJECTED);
6963
6964 hci_dev_lock(hdev);
6965
6966 /* If user space supports this command it is also expected to
6967 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6968 */
6969 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6970
6971 if (cp->privacy) {
6972 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6973 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6974 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6975 hci_adv_instances_set_rpa_expired(hdev, true);
6976 if (cp->privacy == 0x02)
6977 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6978 else
6979 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6980 } else {
6981 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6982 memset(hdev->irk, 0, sizeof(hdev->irk));
6983 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6984 hci_adv_instances_set_rpa_expired(hdev, false);
6985 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6986 }
6987
6988 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6989 if (err < 0)
6990 goto unlock;
6991
6992 if (changed)
6993 err = new_settings(hdev, sk);
6994
6995 unlock:
6996 hci_dev_unlock(hdev);
6997 return err;
6998 }
6999
irk_is_valid(struct mgmt_irk_info * irk)7000 static bool irk_is_valid(struct mgmt_irk_info *irk)
7001 {
7002 switch (irk->addr.type) {
7003 case BDADDR_LE_PUBLIC:
7004 return true;
7005
7006 case BDADDR_LE_RANDOM:
7007 /* Two most significant bits shall be set */
7008 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7009 return false;
7010 return true;
7011 }
7012
7013 return false;
7014 }
7015
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7016 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7017 u16 len)
7018 {
7019 struct mgmt_cp_load_irks *cp = cp_data;
7020 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7021 sizeof(struct mgmt_irk_info));
7022 u16 irk_count, expected_len;
7023 int i, err;
7024
7025 bt_dev_dbg(hdev, "sock %p", sk);
7026
7027 if (!lmp_le_capable(hdev))
7028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7029 MGMT_STATUS_NOT_SUPPORTED);
7030
7031 irk_count = __le16_to_cpu(cp->irk_count);
7032 if (irk_count > max_irk_count) {
7033 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7034 irk_count);
7035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7036 MGMT_STATUS_INVALID_PARAMS);
7037 }
7038
7039 expected_len = struct_size(cp, irks, irk_count);
7040 if (expected_len != len) {
7041 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7042 expected_len, len);
7043 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7044 MGMT_STATUS_INVALID_PARAMS);
7045 }
7046
7047 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7048
7049 for (i = 0; i < irk_count; i++) {
7050 struct mgmt_irk_info *key = &cp->irks[i];
7051
7052 if (!irk_is_valid(key))
7053 return mgmt_cmd_status(sk, hdev->id,
7054 MGMT_OP_LOAD_IRKS,
7055 MGMT_STATUS_INVALID_PARAMS);
7056 }
7057
7058 hci_dev_lock(hdev);
7059
7060 hci_smp_irks_clear(hdev);
7061
7062 for (i = 0; i < irk_count; i++) {
7063 struct mgmt_irk_info *irk = &cp->irks[i];
7064 u8 addr_type = le_addr_type(irk->addr.type);
7065
7066 if (hci_is_blocked_key(hdev,
7067 HCI_BLOCKED_KEY_TYPE_IRK,
7068 irk->val)) {
7069 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7070 &irk->addr.bdaddr);
7071 continue;
7072 }
7073
7074 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7075 if (irk->addr.type == BDADDR_BREDR)
7076 addr_type = BDADDR_BREDR;
7077
7078 hci_add_irk(hdev, &irk->addr.bdaddr,
7079 addr_type, irk->val,
7080 BDADDR_ANY);
7081 }
7082
7083 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7084
7085 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7086
7087 hci_dev_unlock(hdev);
7088
7089 return err;
7090 }
7091
ltk_is_valid(struct mgmt_ltk_info * key)7092 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7093 {
7094 if (key->initiator != 0x00 && key->initiator != 0x01)
7095 return false;
7096
7097 switch (key->addr.type) {
7098 case BDADDR_LE_PUBLIC:
7099 return true;
7100
7101 case BDADDR_LE_RANDOM:
7102 /* Two most significant bits shall be set */
7103 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7104 return false;
7105 return true;
7106 }
7107
7108 return false;
7109 }
7110
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7111 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7112 void *cp_data, u16 len)
7113 {
7114 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7115 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7116 sizeof(struct mgmt_ltk_info));
7117 u16 key_count, expected_len;
7118 int i, err;
7119
7120 bt_dev_dbg(hdev, "sock %p", sk);
7121
7122 if (!lmp_le_capable(hdev))
7123 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7124 MGMT_STATUS_NOT_SUPPORTED);
7125
7126 key_count = __le16_to_cpu(cp->key_count);
7127 if (key_count > max_key_count) {
7128 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7129 key_count);
7130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7131 MGMT_STATUS_INVALID_PARAMS);
7132 }
7133
7134 expected_len = struct_size(cp, keys, key_count);
7135 if (expected_len != len) {
7136 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7137 expected_len, len);
7138 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7139 MGMT_STATUS_INVALID_PARAMS);
7140 }
7141
7142 bt_dev_dbg(hdev, "key_count %u", key_count);
7143
7144 for (i = 0; i < key_count; i++) {
7145 struct mgmt_ltk_info *key = &cp->keys[i];
7146
7147 if (!ltk_is_valid(key))
7148 return mgmt_cmd_status(sk, hdev->id,
7149 MGMT_OP_LOAD_LONG_TERM_KEYS,
7150 MGMT_STATUS_INVALID_PARAMS);
7151 }
7152
7153 hci_dev_lock(hdev);
7154
7155 hci_smp_ltks_clear(hdev);
7156
7157 for (i = 0; i < key_count; i++) {
7158 struct mgmt_ltk_info *key = &cp->keys[i];
7159 u8 type, authenticated;
7160 u8 addr_type = le_addr_type(key->addr.type);
7161
7162 if (hci_is_blocked_key(hdev,
7163 HCI_BLOCKED_KEY_TYPE_LTK,
7164 key->val)) {
7165 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7166 &key->addr.bdaddr);
7167 continue;
7168 }
7169
7170 switch (key->type) {
7171 case MGMT_LTK_UNAUTHENTICATED:
7172 authenticated = 0x00;
7173 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7174 break;
7175 case MGMT_LTK_AUTHENTICATED:
7176 authenticated = 0x01;
7177 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7178 break;
7179 case MGMT_LTK_P256_UNAUTH:
7180 authenticated = 0x00;
7181 type = SMP_LTK_P256;
7182 break;
7183 case MGMT_LTK_P256_AUTH:
7184 authenticated = 0x01;
7185 type = SMP_LTK_P256;
7186 break;
7187 case MGMT_LTK_P256_DEBUG:
7188 authenticated = 0x00;
7189 type = SMP_LTK_P256_DEBUG;
7190 fallthrough;
7191 default:
7192 continue;
7193 }
7194
7195 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7196 if (key->addr.type == BDADDR_BREDR)
7197 addr_type = BDADDR_BREDR;
7198
7199 hci_add_ltk(hdev, &key->addr.bdaddr,
7200 addr_type, type, authenticated,
7201 key->val, key->enc_size, key->ediv, key->rand);
7202 }
7203
7204 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7205 NULL, 0);
7206
7207 hci_dev_unlock(hdev);
7208
7209 return err;
7210 }
7211
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7212 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7213 {
7214 struct mgmt_pending_cmd *cmd = data;
7215 struct hci_conn *conn = cmd->user_data;
7216 struct mgmt_cp_get_conn_info *cp = cmd->param;
7217 struct mgmt_rp_get_conn_info rp;
7218 u8 status;
7219
7220 bt_dev_dbg(hdev, "err %d", err);
7221
7222 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7223
7224 status = mgmt_status(err);
7225 if (status == MGMT_STATUS_SUCCESS) {
7226 rp.rssi = conn->rssi;
7227 rp.tx_power = conn->tx_power;
7228 rp.max_tx_power = conn->max_tx_power;
7229 } else {
7230 rp.rssi = HCI_RSSI_INVALID;
7231 rp.tx_power = HCI_TX_POWER_INVALID;
7232 rp.max_tx_power = HCI_TX_POWER_INVALID;
7233 }
7234
7235 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7236 &rp, sizeof(rp));
7237
7238 mgmt_pending_free(cmd);
7239 }
7240
get_conn_info_sync(struct hci_dev * hdev,void * data)7241 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7242 {
7243 struct mgmt_pending_cmd *cmd = data;
7244 struct mgmt_cp_get_conn_info *cp = cmd->param;
7245 struct hci_conn *conn;
7246 int err;
7247 __le16 handle;
7248
7249 /* Make sure we are still connected */
7250 if (cp->addr.type == BDADDR_BREDR)
7251 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7252 &cp->addr.bdaddr);
7253 else
7254 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7255
7256 if (!conn || conn->state != BT_CONNECTED)
7257 return MGMT_STATUS_NOT_CONNECTED;
7258
7259 cmd->user_data = conn;
7260 handle = cpu_to_le16(conn->handle);
7261
7262 /* Refresh RSSI each time */
7263 err = hci_read_rssi_sync(hdev, handle);
7264
7265 /* For LE links TX power does not change thus we don't need to
7266 * query for it once value is known.
7267 */
7268 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7269 conn->tx_power == HCI_TX_POWER_INVALID))
7270 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7271
7272 /* Max TX power needs to be read only once per connection */
7273 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7274 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7275
7276 return err;
7277 }
7278
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7279 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7280 u16 len)
7281 {
7282 struct mgmt_cp_get_conn_info *cp = data;
7283 struct mgmt_rp_get_conn_info rp;
7284 struct hci_conn *conn;
7285 unsigned long conn_info_age;
7286 int err = 0;
7287
7288 bt_dev_dbg(hdev, "sock %p", sk);
7289
7290 memset(&rp, 0, sizeof(rp));
7291 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7292 rp.addr.type = cp->addr.type;
7293
7294 if (!bdaddr_type_is_valid(cp->addr.type))
7295 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7296 MGMT_STATUS_INVALID_PARAMS,
7297 &rp, sizeof(rp));
7298
7299 hci_dev_lock(hdev);
7300
7301 if (!hdev_is_powered(hdev)) {
7302 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7303 MGMT_STATUS_NOT_POWERED, &rp,
7304 sizeof(rp));
7305 goto unlock;
7306 }
7307
7308 if (cp->addr.type == BDADDR_BREDR)
7309 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7310 &cp->addr.bdaddr);
7311 else
7312 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7313
7314 if (!conn || conn->state != BT_CONNECTED) {
7315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7316 MGMT_STATUS_NOT_CONNECTED, &rp,
7317 sizeof(rp));
7318 goto unlock;
7319 }
7320
7321 /* To avoid client trying to guess when to poll again for information we
7322 * calculate conn info age as random value between min/max set in hdev.
7323 */
7324 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7325 hdev->conn_info_max_age - 1);
7326
7327 /* Query controller to refresh cached values if they are too old or were
7328 * never read.
7329 */
7330 if (time_after(jiffies, conn->conn_info_timestamp +
7331 msecs_to_jiffies(conn_info_age)) ||
7332 !conn->conn_info_timestamp) {
7333 struct mgmt_pending_cmd *cmd;
7334
7335 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7336 len);
7337 if (!cmd) {
7338 err = -ENOMEM;
7339 } else {
7340 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7341 cmd, get_conn_info_complete);
7342 }
7343
7344 if (err < 0) {
7345 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7346 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7347
7348 if (cmd)
7349 mgmt_pending_free(cmd);
7350
7351 goto unlock;
7352 }
7353
7354 conn->conn_info_timestamp = jiffies;
7355 } else {
7356 /* Cache is valid, just reply with values cached in hci_conn */
7357 rp.rssi = conn->rssi;
7358 rp.tx_power = conn->tx_power;
7359 rp.max_tx_power = conn->max_tx_power;
7360
7361 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7362 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7363 }
7364
7365 unlock:
7366 hci_dev_unlock(hdev);
7367 return err;
7368 }
7369
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7370 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7371 {
7372 struct mgmt_pending_cmd *cmd = data;
7373 struct mgmt_cp_get_clock_info *cp = cmd->param;
7374 struct mgmt_rp_get_clock_info rp;
7375 struct hci_conn *conn = cmd->user_data;
7376 u8 status = mgmt_status(err);
7377
7378 bt_dev_dbg(hdev, "err %d", err);
7379
7380 memset(&rp, 0, sizeof(rp));
7381 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7382 rp.addr.type = cp->addr.type;
7383
7384 if (err)
7385 goto complete;
7386
7387 rp.local_clock = cpu_to_le32(hdev->clock);
7388
7389 if (conn) {
7390 rp.piconet_clock = cpu_to_le32(conn->clock);
7391 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7392 }
7393
7394 complete:
7395 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7396 sizeof(rp));
7397
7398 mgmt_pending_free(cmd);
7399 }
7400
get_clock_info_sync(struct hci_dev * hdev,void * data)7401 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7402 {
7403 struct mgmt_pending_cmd *cmd = data;
7404 struct mgmt_cp_get_clock_info *cp = cmd->param;
7405 struct hci_cp_read_clock hci_cp;
7406 struct hci_conn *conn;
7407
7408 memset(&hci_cp, 0, sizeof(hci_cp));
7409 hci_read_clock_sync(hdev, &hci_cp);
7410
7411 /* Make sure connection still exists */
7412 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7413 if (!conn || conn->state != BT_CONNECTED)
7414 return MGMT_STATUS_NOT_CONNECTED;
7415
7416 cmd->user_data = conn;
7417 hci_cp.handle = cpu_to_le16(conn->handle);
7418 hci_cp.which = 0x01; /* Piconet clock */
7419
7420 return hci_read_clock_sync(hdev, &hci_cp);
7421 }
7422
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7423 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7424 u16 len)
7425 {
7426 struct mgmt_cp_get_clock_info *cp = data;
7427 struct mgmt_rp_get_clock_info rp;
7428 struct mgmt_pending_cmd *cmd;
7429 struct hci_conn *conn;
7430 int err;
7431
7432 bt_dev_dbg(hdev, "sock %p", sk);
7433
7434 memset(&rp, 0, sizeof(rp));
7435 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7436 rp.addr.type = cp->addr.type;
7437
7438 if (cp->addr.type != BDADDR_BREDR)
7439 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7440 MGMT_STATUS_INVALID_PARAMS,
7441 &rp, sizeof(rp));
7442
7443 hci_dev_lock(hdev);
7444
7445 if (!hdev_is_powered(hdev)) {
7446 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7447 MGMT_STATUS_NOT_POWERED, &rp,
7448 sizeof(rp));
7449 goto unlock;
7450 }
7451
7452 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7453 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7454 &cp->addr.bdaddr);
7455 if (!conn || conn->state != BT_CONNECTED) {
7456 err = mgmt_cmd_complete(sk, hdev->id,
7457 MGMT_OP_GET_CLOCK_INFO,
7458 MGMT_STATUS_NOT_CONNECTED,
7459 &rp, sizeof(rp));
7460 goto unlock;
7461 }
7462 } else {
7463 conn = NULL;
7464 }
7465
7466 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7467 if (!cmd)
7468 err = -ENOMEM;
7469 else
7470 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7471 get_clock_info_complete);
7472
7473 if (err < 0) {
7474 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7475 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7476
7477 if (cmd)
7478 mgmt_pending_free(cmd);
7479 }
7480
7481
7482 unlock:
7483 hci_dev_unlock(hdev);
7484 return err;
7485 }
7486
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7487 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7488 {
7489 struct hci_conn *conn;
7490
7491 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7492 if (!conn)
7493 return false;
7494
7495 if (conn->dst_type != type)
7496 return false;
7497
7498 if (conn->state != BT_CONNECTED)
7499 return false;
7500
7501 return true;
7502 }
7503
7504 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7505 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7506 u8 addr_type, u8 auto_connect)
7507 {
7508 struct hci_conn_params *params;
7509
7510 params = hci_conn_params_add(hdev, addr, addr_type);
7511 if (!params)
7512 return -EIO;
7513
7514 if (params->auto_connect == auto_connect)
7515 return 0;
7516
7517 hci_pend_le_list_del_init(params);
7518
7519 switch (auto_connect) {
7520 case HCI_AUTO_CONN_DISABLED:
7521 case HCI_AUTO_CONN_LINK_LOSS:
7522 /* If auto connect is being disabled when we're trying to
7523 * connect to device, keep connecting.
7524 */
7525 if (params->explicit_connect)
7526 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7527 break;
7528 case HCI_AUTO_CONN_REPORT:
7529 if (params->explicit_connect)
7530 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7531 else
7532 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7533 break;
7534 case HCI_AUTO_CONN_DIRECT:
7535 case HCI_AUTO_CONN_ALWAYS:
7536 if (!is_connected(hdev, addr, addr_type))
7537 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7538 break;
7539 }
7540
7541 params->auto_connect = auto_connect;
7542
7543 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7544 addr, addr_type, auto_connect);
7545
7546 return 0;
7547 }
7548
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7549 static void device_added(struct sock *sk, struct hci_dev *hdev,
7550 bdaddr_t *bdaddr, u8 type, u8 action)
7551 {
7552 struct mgmt_ev_device_added ev;
7553
7554 bacpy(&ev.addr.bdaddr, bdaddr);
7555 ev.addr.type = type;
7556 ev.action = action;
7557
7558 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7559 }
7560
add_device_sync(struct hci_dev * hdev,void * data)7561 static int add_device_sync(struct hci_dev *hdev, void *data)
7562 {
7563 return hci_update_passive_scan_sync(hdev);
7564 }
7565
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7566 static int add_device(struct sock *sk, struct hci_dev *hdev,
7567 void *data, u16 len)
7568 {
7569 struct mgmt_cp_add_device *cp = data;
7570 u8 auto_conn, addr_type;
7571 struct hci_conn_params *params;
7572 int err;
7573 u32 current_flags = 0;
7574 u32 supported_flags;
7575
7576 bt_dev_dbg(hdev, "sock %p", sk);
7577
7578 if (!bdaddr_type_is_valid(cp->addr.type) ||
7579 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7580 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7581 MGMT_STATUS_INVALID_PARAMS,
7582 &cp->addr, sizeof(cp->addr));
7583
7584 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7585 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7586 MGMT_STATUS_INVALID_PARAMS,
7587 &cp->addr, sizeof(cp->addr));
7588
7589 hci_dev_lock(hdev);
7590
7591 if (cp->addr.type == BDADDR_BREDR) {
7592 /* Only incoming connections action is supported for now */
7593 if (cp->action != 0x01) {
7594 err = mgmt_cmd_complete(sk, hdev->id,
7595 MGMT_OP_ADD_DEVICE,
7596 MGMT_STATUS_INVALID_PARAMS,
7597 &cp->addr, sizeof(cp->addr));
7598 goto unlock;
7599 }
7600
7601 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7602 &cp->addr.bdaddr,
7603 cp->addr.type, 0);
7604 if (err)
7605 goto unlock;
7606
7607 hci_update_scan(hdev);
7608
7609 goto added;
7610 }
7611
7612 addr_type = le_addr_type(cp->addr.type);
7613
7614 if (cp->action == 0x02)
7615 auto_conn = HCI_AUTO_CONN_ALWAYS;
7616 else if (cp->action == 0x01)
7617 auto_conn = HCI_AUTO_CONN_DIRECT;
7618 else
7619 auto_conn = HCI_AUTO_CONN_REPORT;
7620
7621 /* Kernel internally uses conn_params with resolvable private
7622 * address, but Add Device allows only identity addresses.
7623 * Make sure it is enforced before calling
7624 * hci_conn_params_lookup.
7625 */
7626 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7627 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7628 MGMT_STATUS_INVALID_PARAMS,
7629 &cp->addr, sizeof(cp->addr));
7630 goto unlock;
7631 }
7632
7633 /* If the connection parameters don't exist for this device,
7634 * they will be created and configured with defaults.
7635 */
7636 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7637 auto_conn) < 0) {
7638 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7639 MGMT_STATUS_FAILED, &cp->addr,
7640 sizeof(cp->addr));
7641 goto unlock;
7642 } else {
7643 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7644 addr_type);
7645 if (params)
7646 current_flags = params->flags;
7647 }
7648
7649 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7650 if (err < 0)
7651 goto unlock;
7652
7653 added:
7654 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7655 supported_flags = hdev->conn_flags;
7656 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7657 supported_flags, current_flags);
7658
7659 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7660 MGMT_STATUS_SUCCESS, &cp->addr,
7661 sizeof(cp->addr));
7662
7663 unlock:
7664 hci_dev_unlock(hdev);
7665 return err;
7666 }
7667
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7668 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7669 bdaddr_t *bdaddr, u8 type)
7670 {
7671 struct mgmt_ev_device_removed ev;
7672
7673 bacpy(&ev.addr.bdaddr, bdaddr);
7674 ev.addr.type = type;
7675
7676 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7677 }
7678
remove_device_sync(struct hci_dev * hdev,void * data)7679 static int remove_device_sync(struct hci_dev *hdev, void *data)
7680 {
7681 return hci_update_passive_scan_sync(hdev);
7682 }
7683
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7684 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7685 void *data, u16 len)
7686 {
7687 struct mgmt_cp_remove_device *cp = data;
7688 int err;
7689
7690 bt_dev_dbg(hdev, "sock %p", sk);
7691
7692 hci_dev_lock(hdev);
7693
7694 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7695 struct hci_conn_params *params;
7696 u8 addr_type;
7697
7698 if (!bdaddr_type_is_valid(cp->addr.type)) {
7699 err = mgmt_cmd_complete(sk, hdev->id,
7700 MGMT_OP_REMOVE_DEVICE,
7701 MGMT_STATUS_INVALID_PARAMS,
7702 &cp->addr, sizeof(cp->addr));
7703 goto unlock;
7704 }
7705
7706 if (cp->addr.type == BDADDR_BREDR) {
7707 err = hci_bdaddr_list_del(&hdev->accept_list,
7708 &cp->addr.bdaddr,
7709 cp->addr.type);
7710 if (err) {
7711 err = mgmt_cmd_complete(sk, hdev->id,
7712 MGMT_OP_REMOVE_DEVICE,
7713 MGMT_STATUS_INVALID_PARAMS,
7714 &cp->addr,
7715 sizeof(cp->addr));
7716 goto unlock;
7717 }
7718
7719 hci_update_scan(hdev);
7720
7721 device_removed(sk, hdev, &cp->addr.bdaddr,
7722 cp->addr.type);
7723 goto complete;
7724 }
7725
7726 addr_type = le_addr_type(cp->addr.type);
7727
7728 /* Kernel internally uses conn_params with resolvable private
7729 * address, but Remove Device allows only identity addresses.
7730 * Make sure it is enforced before calling
7731 * hci_conn_params_lookup.
7732 */
7733 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7734 err = mgmt_cmd_complete(sk, hdev->id,
7735 MGMT_OP_REMOVE_DEVICE,
7736 MGMT_STATUS_INVALID_PARAMS,
7737 &cp->addr, sizeof(cp->addr));
7738 goto unlock;
7739 }
7740
7741 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7742 addr_type);
7743 if (!params) {
7744 err = mgmt_cmd_complete(sk, hdev->id,
7745 MGMT_OP_REMOVE_DEVICE,
7746 MGMT_STATUS_INVALID_PARAMS,
7747 &cp->addr, sizeof(cp->addr));
7748 goto unlock;
7749 }
7750
7751 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7752 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7753 err = mgmt_cmd_complete(sk, hdev->id,
7754 MGMT_OP_REMOVE_DEVICE,
7755 MGMT_STATUS_INVALID_PARAMS,
7756 &cp->addr, sizeof(cp->addr));
7757 goto unlock;
7758 }
7759
7760 hci_conn_params_free(params);
7761
7762 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7763 } else {
7764 struct hci_conn_params *p, *tmp;
7765 struct bdaddr_list *b, *btmp;
7766
7767 if (cp->addr.type) {
7768 err = mgmt_cmd_complete(sk, hdev->id,
7769 MGMT_OP_REMOVE_DEVICE,
7770 MGMT_STATUS_INVALID_PARAMS,
7771 &cp->addr, sizeof(cp->addr));
7772 goto unlock;
7773 }
7774
7775 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7776 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7777 list_del(&b->list);
7778 kfree(b);
7779 }
7780
7781 hci_update_scan(hdev);
7782
7783 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7784 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7785 continue;
7786 device_removed(sk, hdev, &p->addr, p->addr_type);
7787 if (p->explicit_connect) {
7788 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7789 continue;
7790 }
7791 hci_conn_params_free(p);
7792 }
7793
7794 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7795 }
7796
7797 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7798
7799 complete:
7800 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7801 MGMT_STATUS_SUCCESS, &cp->addr,
7802 sizeof(cp->addr));
7803 unlock:
7804 hci_dev_unlock(hdev);
7805 return err;
7806 }
7807
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7808 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7809 u16 len)
7810 {
7811 struct mgmt_cp_load_conn_param *cp = data;
7812 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7813 sizeof(struct mgmt_conn_param));
7814 u16 param_count, expected_len;
7815 int i;
7816
7817 if (!lmp_le_capable(hdev))
7818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7819 MGMT_STATUS_NOT_SUPPORTED);
7820
7821 param_count = __le16_to_cpu(cp->param_count);
7822 if (param_count > max_param_count) {
7823 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7824 param_count);
7825 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7826 MGMT_STATUS_INVALID_PARAMS);
7827 }
7828
7829 expected_len = struct_size(cp, params, param_count);
7830 if (expected_len != len) {
7831 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7832 expected_len, len);
7833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7834 MGMT_STATUS_INVALID_PARAMS);
7835 }
7836
7837 bt_dev_dbg(hdev, "param_count %u", param_count);
7838
7839 hci_dev_lock(hdev);
7840
7841 hci_conn_params_clear_disabled(hdev);
7842
7843 for (i = 0; i < param_count; i++) {
7844 struct mgmt_conn_param *param = &cp->params[i];
7845 struct hci_conn_params *hci_param;
7846 u16 min, max, latency, timeout;
7847 u8 addr_type;
7848
7849 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7850 param->addr.type);
7851
7852 if (param->addr.type == BDADDR_LE_PUBLIC) {
7853 addr_type = ADDR_LE_DEV_PUBLIC;
7854 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7855 addr_type = ADDR_LE_DEV_RANDOM;
7856 } else {
7857 bt_dev_err(hdev, "ignoring invalid connection parameters");
7858 continue;
7859 }
7860
7861 min = le16_to_cpu(param->min_interval);
7862 max = le16_to_cpu(param->max_interval);
7863 latency = le16_to_cpu(param->latency);
7864 timeout = le16_to_cpu(param->timeout);
7865
7866 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7867 min, max, latency, timeout);
7868
7869 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7870 bt_dev_err(hdev, "ignoring invalid connection parameters");
7871 continue;
7872 }
7873
7874 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7875 addr_type);
7876 if (!hci_param) {
7877 bt_dev_err(hdev, "failed to add connection parameters");
7878 continue;
7879 }
7880
7881 hci_param->conn_min_interval = min;
7882 hci_param->conn_max_interval = max;
7883 hci_param->conn_latency = latency;
7884 hci_param->supervision_timeout = timeout;
7885 }
7886
7887 hci_dev_unlock(hdev);
7888
7889 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7890 NULL, 0);
7891 }
7892
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7893 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7894 void *data, u16 len)
7895 {
7896 struct mgmt_cp_set_external_config *cp = data;
7897 bool changed;
7898 int err;
7899
7900 bt_dev_dbg(hdev, "sock %p", sk);
7901
7902 if (hdev_is_powered(hdev))
7903 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7904 MGMT_STATUS_REJECTED);
7905
7906 if (cp->config != 0x00 && cp->config != 0x01)
7907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7908 MGMT_STATUS_INVALID_PARAMS);
7909
7910 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7911 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7912 MGMT_STATUS_NOT_SUPPORTED);
7913
7914 hci_dev_lock(hdev);
7915
7916 if (cp->config)
7917 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7918 else
7919 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7920
7921 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7922 if (err < 0)
7923 goto unlock;
7924
7925 if (!changed)
7926 goto unlock;
7927
7928 err = new_options(hdev, sk);
7929
7930 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7931 mgmt_index_removed(hdev);
7932
7933 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7934 hci_dev_set_flag(hdev, HCI_CONFIG);
7935 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7936
7937 queue_work(hdev->req_workqueue, &hdev->power_on);
7938 } else {
7939 set_bit(HCI_RAW, &hdev->flags);
7940 mgmt_index_added(hdev);
7941 }
7942 }
7943
7944 unlock:
7945 hci_dev_unlock(hdev);
7946 return err;
7947 }
7948
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7949 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7950 void *data, u16 len)
7951 {
7952 struct mgmt_cp_set_public_address *cp = data;
7953 bool changed;
7954 int err;
7955
7956 bt_dev_dbg(hdev, "sock %p", sk);
7957
7958 if (hdev_is_powered(hdev))
7959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7960 MGMT_STATUS_REJECTED);
7961
7962 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7964 MGMT_STATUS_INVALID_PARAMS);
7965
7966 if (!hdev->set_bdaddr)
7967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7968 MGMT_STATUS_NOT_SUPPORTED);
7969
7970 hci_dev_lock(hdev);
7971
7972 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7973 bacpy(&hdev->public_addr, &cp->bdaddr);
7974
7975 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7976 if (err < 0)
7977 goto unlock;
7978
7979 if (!changed)
7980 goto unlock;
7981
7982 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7983 err = new_options(hdev, sk);
7984
7985 if (is_configured(hdev)) {
7986 mgmt_index_removed(hdev);
7987
7988 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7989
7990 hci_dev_set_flag(hdev, HCI_CONFIG);
7991 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7992
7993 queue_work(hdev->req_workqueue, &hdev->power_on);
7994 }
7995
7996 unlock:
7997 hci_dev_unlock(hdev);
7998 return err;
7999 }
8000
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8001 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8002 int err)
8003 {
8004 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8005 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8006 u8 *h192, *r192, *h256, *r256;
8007 struct mgmt_pending_cmd *cmd = data;
8008 struct sk_buff *skb = cmd->skb;
8009 u8 status = mgmt_status(err);
8010 u16 eir_len;
8011
8012 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8013 return;
8014
8015 if (!status) {
8016 if (!skb)
8017 status = MGMT_STATUS_FAILED;
8018 else if (IS_ERR(skb))
8019 status = mgmt_status(PTR_ERR(skb));
8020 else
8021 status = mgmt_status(skb->data[0]);
8022 }
8023
8024 bt_dev_dbg(hdev, "status %u", status);
8025
8026 mgmt_cp = cmd->param;
8027
8028 if (status) {
8029 status = mgmt_status(status);
8030 eir_len = 0;
8031
8032 h192 = NULL;
8033 r192 = NULL;
8034 h256 = NULL;
8035 r256 = NULL;
8036 } else if (!bredr_sc_enabled(hdev)) {
8037 struct hci_rp_read_local_oob_data *rp;
8038
8039 if (skb->len != sizeof(*rp)) {
8040 status = MGMT_STATUS_FAILED;
8041 eir_len = 0;
8042 } else {
8043 status = MGMT_STATUS_SUCCESS;
8044 rp = (void *)skb->data;
8045
8046 eir_len = 5 + 18 + 18;
8047 h192 = rp->hash;
8048 r192 = rp->rand;
8049 h256 = NULL;
8050 r256 = NULL;
8051 }
8052 } else {
8053 struct hci_rp_read_local_oob_ext_data *rp;
8054
8055 if (skb->len != sizeof(*rp)) {
8056 status = MGMT_STATUS_FAILED;
8057 eir_len = 0;
8058 } else {
8059 status = MGMT_STATUS_SUCCESS;
8060 rp = (void *)skb->data;
8061
8062 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8063 eir_len = 5 + 18 + 18;
8064 h192 = NULL;
8065 r192 = NULL;
8066 } else {
8067 eir_len = 5 + 18 + 18 + 18 + 18;
8068 h192 = rp->hash192;
8069 r192 = rp->rand192;
8070 }
8071
8072 h256 = rp->hash256;
8073 r256 = rp->rand256;
8074 }
8075 }
8076
8077 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8078 if (!mgmt_rp)
8079 goto done;
8080
8081 if (eir_len == 0)
8082 goto send_rsp;
8083
8084 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8085 hdev->dev_class, 3);
8086
8087 if (h192 && r192) {
8088 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8089 EIR_SSP_HASH_C192, h192, 16);
8090 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8091 EIR_SSP_RAND_R192, r192, 16);
8092 }
8093
8094 if (h256 && r256) {
8095 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8096 EIR_SSP_HASH_C256, h256, 16);
8097 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8098 EIR_SSP_RAND_R256, r256, 16);
8099 }
8100
8101 send_rsp:
8102 mgmt_rp->type = mgmt_cp->type;
8103 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8104
8105 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8106 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8107 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8108 if (err < 0 || status)
8109 goto done;
8110
8111 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8112
8113 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8114 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8115 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8116 done:
8117 if (skb && !IS_ERR(skb))
8118 kfree_skb(skb);
8119
8120 kfree(mgmt_rp);
8121 mgmt_pending_remove(cmd);
8122 }
8123
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8124 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8125 struct mgmt_cp_read_local_oob_ext_data *cp)
8126 {
8127 struct mgmt_pending_cmd *cmd;
8128 int err;
8129
8130 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8131 cp, sizeof(*cp));
8132 if (!cmd)
8133 return -ENOMEM;
8134
8135 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8136 read_local_oob_ext_data_complete);
8137
8138 if (err < 0) {
8139 mgmt_pending_remove(cmd);
8140 return err;
8141 }
8142
8143 return 0;
8144 }
8145
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8146 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8147 void *data, u16 data_len)
8148 {
8149 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8150 struct mgmt_rp_read_local_oob_ext_data *rp;
8151 size_t rp_len;
8152 u16 eir_len;
8153 u8 status, flags, role, addr[7], hash[16], rand[16];
8154 int err;
8155
8156 bt_dev_dbg(hdev, "sock %p", sk);
8157
8158 if (hdev_is_powered(hdev)) {
8159 switch (cp->type) {
8160 case BIT(BDADDR_BREDR):
8161 status = mgmt_bredr_support(hdev);
8162 if (status)
8163 eir_len = 0;
8164 else
8165 eir_len = 5;
8166 break;
8167 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8168 status = mgmt_le_support(hdev);
8169 if (status)
8170 eir_len = 0;
8171 else
8172 eir_len = 9 + 3 + 18 + 18 + 3;
8173 break;
8174 default:
8175 status = MGMT_STATUS_INVALID_PARAMS;
8176 eir_len = 0;
8177 break;
8178 }
8179 } else {
8180 status = MGMT_STATUS_NOT_POWERED;
8181 eir_len = 0;
8182 }
8183
8184 rp_len = sizeof(*rp) + eir_len;
8185 rp = kmalloc(rp_len, GFP_ATOMIC);
8186 if (!rp)
8187 return -ENOMEM;
8188
8189 if (!status && !lmp_ssp_capable(hdev)) {
8190 status = MGMT_STATUS_NOT_SUPPORTED;
8191 eir_len = 0;
8192 }
8193
8194 if (status)
8195 goto complete;
8196
8197 hci_dev_lock(hdev);
8198
8199 eir_len = 0;
8200 switch (cp->type) {
8201 case BIT(BDADDR_BREDR):
8202 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8203 err = read_local_ssp_oob_req(hdev, sk, cp);
8204 hci_dev_unlock(hdev);
8205 if (!err)
8206 goto done;
8207
8208 status = MGMT_STATUS_FAILED;
8209 goto complete;
8210 } else {
8211 eir_len = eir_append_data(rp->eir, eir_len,
8212 EIR_CLASS_OF_DEV,
8213 hdev->dev_class, 3);
8214 }
8215 break;
8216 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8217 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8218 smp_generate_oob(hdev, hash, rand) < 0) {
8219 hci_dev_unlock(hdev);
8220 status = MGMT_STATUS_FAILED;
8221 goto complete;
8222 }
8223
8224 /* This should return the active RPA, but since the RPA
8225 * is only programmed on demand, it is really hard to fill
8226 * this in at the moment. For now disallow retrieving
8227 * local out-of-band data when privacy is in use.
8228 *
8229 * Returning the identity address will not help here since
8230 * pairing happens before the identity resolving key is
8231 * known and thus the connection establishment happens
8232 * based on the RPA and not the identity address.
8233 */
8234 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8235 hci_dev_unlock(hdev);
8236 status = MGMT_STATUS_REJECTED;
8237 goto complete;
8238 }
8239
8240 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8241 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8242 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8243 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8244 memcpy(addr, &hdev->static_addr, 6);
8245 addr[6] = 0x01;
8246 } else {
8247 memcpy(addr, &hdev->bdaddr, 6);
8248 addr[6] = 0x00;
8249 }
8250
8251 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8252 addr, sizeof(addr));
8253
8254 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8255 role = 0x02;
8256 else
8257 role = 0x01;
8258
8259 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8260 &role, sizeof(role));
8261
8262 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8263 eir_len = eir_append_data(rp->eir, eir_len,
8264 EIR_LE_SC_CONFIRM,
8265 hash, sizeof(hash));
8266
8267 eir_len = eir_append_data(rp->eir, eir_len,
8268 EIR_LE_SC_RANDOM,
8269 rand, sizeof(rand));
8270 }
8271
8272 flags = mgmt_get_adv_discov_flags(hdev);
8273
8274 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8275 flags |= LE_AD_NO_BREDR;
8276
8277 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8278 &flags, sizeof(flags));
8279 break;
8280 }
8281
8282 hci_dev_unlock(hdev);
8283
8284 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8285
8286 status = MGMT_STATUS_SUCCESS;
8287
8288 complete:
8289 rp->type = cp->type;
8290 rp->eir_len = cpu_to_le16(eir_len);
8291
8292 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8293 status, rp, sizeof(*rp) + eir_len);
8294 if (err < 0 || status)
8295 goto done;
8296
8297 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8298 rp, sizeof(*rp) + eir_len,
8299 HCI_MGMT_OOB_DATA_EVENTS, sk);
8300
8301 done:
8302 kfree(rp);
8303
8304 return err;
8305 }
8306
get_supported_adv_flags(struct hci_dev * hdev)8307 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8308 {
8309 u32 flags = 0;
8310
8311 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8312 flags |= MGMT_ADV_FLAG_DISCOV;
8313 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8314 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8315 flags |= MGMT_ADV_FLAG_APPEARANCE;
8316 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8317 flags |= MGMT_ADV_PARAM_DURATION;
8318 flags |= MGMT_ADV_PARAM_TIMEOUT;
8319 flags |= MGMT_ADV_PARAM_INTERVALS;
8320 flags |= MGMT_ADV_PARAM_TX_POWER;
8321 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8322
8323 /* In extended adv TX_POWER returned from Set Adv Param
8324 * will be always valid.
8325 */
8326 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8327 flags |= MGMT_ADV_FLAG_TX_POWER;
8328
8329 if (ext_adv_capable(hdev)) {
8330 flags |= MGMT_ADV_FLAG_SEC_1M;
8331 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8332 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8333
8334 if (le_2m_capable(hdev))
8335 flags |= MGMT_ADV_FLAG_SEC_2M;
8336
8337 if (le_coded_capable(hdev))
8338 flags |= MGMT_ADV_FLAG_SEC_CODED;
8339 }
8340
8341 return flags;
8342 }
8343
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8344 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8345 void *data, u16 data_len)
8346 {
8347 struct mgmt_rp_read_adv_features *rp;
8348 size_t rp_len;
8349 int err;
8350 struct adv_info *adv_instance;
8351 u32 supported_flags;
8352 u8 *instance;
8353
8354 bt_dev_dbg(hdev, "sock %p", sk);
8355
8356 if (!lmp_le_capable(hdev))
8357 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8358 MGMT_STATUS_REJECTED);
8359
8360 hci_dev_lock(hdev);
8361
8362 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8363 rp = kmalloc(rp_len, GFP_ATOMIC);
8364 if (!rp) {
8365 hci_dev_unlock(hdev);
8366 return -ENOMEM;
8367 }
8368
8369 supported_flags = get_supported_adv_flags(hdev);
8370
8371 rp->supported_flags = cpu_to_le32(supported_flags);
8372 rp->max_adv_data_len = max_adv_len(hdev);
8373 rp->max_scan_rsp_len = max_adv_len(hdev);
8374 rp->max_instances = hdev->le_num_of_adv_sets;
8375 rp->num_instances = hdev->adv_instance_cnt;
8376
8377 instance = rp->instance;
8378 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8379 /* Only instances 1-le_num_of_adv_sets are externally visible */
8380 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8381 *instance = adv_instance->instance;
8382 instance++;
8383 } else {
8384 rp->num_instances--;
8385 rp_len--;
8386 }
8387 }
8388
8389 hci_dev_unlock(hdev);
8390
8391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8392 MGMT_STATUS_SUCCESS, rp, rp_len);
8393
8394 kfree(rp);
8395
8396 return err;
8397 }
8398
calculate_name_len(struct hci_dev * hdev)8399 static u8 calculate_name_len(struct hci_dev *hdev)
8400 {
8401 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8402
8403 return eir_append_local_name(hdev, buf, 0);
8404 }
8405
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8406 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8407 bool is_adv_data)
8408 {
8409 u8 max_len = max_adv_len(hdev);
8410
8411 if (is_adv_data) {
8412 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8413 MGMT_ADV_FLAG_LIMITED_DISCOV |
8414 MGMT_ADV_FLAG_MANAGED_FLAGS))
8415 max_len -= 3;
8416
8417 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8418 max_len -= 3;
8419 } else {
8420 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8421 max_len -= calculate_name_len(hdev);
8422
8423 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8424 max_len -= 4;
8425 }
8426
8427 return max_len;
8428 }
8429
flags_managed(u32 adv_flags)8430 static bool flags_managed(u32 adv_flags)
8431 {
8432 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8433 MGMT_ADV_FLAG_LIMITED_DISCOV |
8434 MGMT_ADV_FLAG_MANAGED_FLAGS);
8435 }
8436
tx_power_managed(u32 adv_flags)8437 static bool tx_power_managed(u32 adv_flags)
8438 {
8439 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8440 }
8441
name_managed(u32 adv_flags)8442 static bool name_managed(u32 adv_flags)
8443 {
8444 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8445 }
8446
appearance_managed(u32 adv_flags)8447 static bool appearance_managed(u32 adv_flags)
8448 {
8449 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8450 }
8451
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8452 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8453 u8 len, bool is_adv_data)
8454 {
8455 int i, cur_len;
8456 u8 max_len;
8457
8458 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8459
8460 if (len > max_len)
8461 return false;
8462
8463 /* Make sure that the data is correctly formatted. */
8464 for (i = 0; i < len; i += (cur_len + 1)) {
8465 cur_len = data[i];
8466
8467 if (!cur_len)
8468 continue;
8469
8470 if (data[i + 1] == EIR_FLAGS &&
8471 (!is_adv_data || flags_managed(adv_flags)))
8472 return false;
8473
8474 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8475 return false;
8476
8477 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8478 return false;
8479
8480 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8481 return false;
8482
8483 if (data[i + 1] == EIR_APPEARANCE &&
8484 appearance_managed(adv_flags))
8485 return false;
8486
8487 /* If the current field length would exceed the total data
8488 * length, then it's invalid.
8489 */
8490 if (i + cur_len >= len)
8491 return false;
8492 }
8493
8494 return true;
8495 }
8496
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8497 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8498 {
8499 u32 supported_flags, phy_flags;
8500
8501 /* The current implementation only supports a subset of the specified
8502 * flags. Also need to check mutual exclusiveness of sec flags.
8503 */
8504 supported_flags = get_supported_adv_flags(hdev);
8505 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8506 if (adv_flags & ~supported_flags ||
8507 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8508 return false;
8509
8510 return true;
8511 }
8512
adv_busy(struct hci_dev * hdev)8513 static bool adv_busy(struct hci_dev *hdev)
8514 {
8515 return pending_find(MGMT_OP_SET_LE, hdev);
8516 }
8517
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8518 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8519 int err)
8520 {
8521 struct adv_info *adv, *n;
8522
8523 bt_dev_dbg(hdev, "err %d", err);
8524
8525 hci_dev_lock(hdev);
8526
8527 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8528 u8 instance;
8529
8530 if (!adv->pending)
8531 continue;
8532
8533 if (!err) {
8534 adv->pending = false;
8535 continue;
8536 }
8537
8538 instance = adv->instance;
8539
8540 if (hdev->cur_adv_instance == instance)
8541 cancel_adv_timeout(hdev);
8542
8543 hci_remove_adv_instance(hdev, instance);
8544 mgmt_advertising_removed(sk, hdev, instance);
8545 }
8546
8547 hci_dev_unlock(hdev);
8548 }
8549
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8550 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8551 {
8552 struct mgmt_pending_cmd *cmd = data;
8553 struct mgmt_cp_add_advertising *cp = cmd->param;
8554 struct mgmt_rp_add_advertising rp;
8555
8556 memset(&rp, 0, sizeof(rp));
8557
8558 rp.instance = cp->instance;
8559
8560 if (err)
8561 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8562 mgmt_status(err));
8563 else
8564 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8565 mgmt_status(err), &rp, sizeof(rp));
8566
8567 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8568
8569 mgmt_pending_free(cmd);
8570 }
8571
add_advertising_sync(struct hci_dev * hdev,void * data)8572 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8573 {
8574 struct mgmt_pending_cmd *cmd = data;
8575 struct mgmt_cp_add_advertising *cp = cmd->param;
8576
8577 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8578 }
8579
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8580 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8581 void *data, u16 data_len)
8582 {
8583 struct mgmt_cp_add_advertising *cp = data;
8584 struct mgmt_rp_add_advertising rp;
8585 u32 flags;
8586 u8 status;
8587 u16 timeout, duration;
8588 unsigned int prev_instance_cnt;
8589 u8 schedule_instance = 0;
8590 struct adv_info *adv, *next_instance;
8591 int err;
8592 struct mgmt_pending_cmd *cmd;
8593
8594 bt_dev_dbg(hdev, "sock %p", sk);
8595
8596 status = mgmt_le_support(hdev);
8597 if (status)
8598 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8599 status);
8600
8601 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8602 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8603 MGMT_STATUS_INVALID_PARAMS);
8604
8605 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8606 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8607 MGMT_STATUS_INVALID_PARAMS);
8608
8609 flags = __le32_to_cpu(cp->flags);
8610 timeout = __le16_to_cpu(cp->timeout);
8611 duration = __le16_to_cpu(cp->duration);
8612
8613 if (!requested_adv_flags_are_valid(hdev, flags))
8614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8615 MGMT_STATUS_INVALID_PARAMS);
8616
8617 hci_dev_lock(hdev);
8618
8619 if (timeout && !hdev_is_powered(hdev)) {
8620 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8621 MGMT_STATUS_REJECTED);
8622 goto unlock;
8623 }
8624
8625 if (adv_busy(hdev)) {
8626 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8627 MGMT_STATUS_BUSY);
8628 goto unlock;
8629 }
8630
8631 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8632 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8633 cp->scan_rsp_len, false)) {
8634 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8635 MGMT_STATUS_INVALID_PARAMS);
8636 goto unlock;
8637 }
8638
8639 prev_instance_cnt = hdev->adv_instance_cnt;
8640
8641 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8642 cp->adv_data_len, cp->data,
8643 cp->scan_rsp_len,
8644 cp->data + cp->adv_data_len,
8645 timeout, duration,
8646 HCI_ADV_TX_POWER_NO_PREFERENCE,
8647 hdev->le_adv_min_interval,
8648 hdev->le_adv_max_interval, 0);
8649 if (IS_ERR(adv)) {
8650 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8651 MGMT_STATUS_FAILED);
8652 goto unlock;
8653 }
8654
8655 /* Only trigger an advertising added event if a new instance was
8656 * actually added.
8657 */
8658 if (hdev->adv_instance_cnt > prev_instance_cnt)
8659 mgmt_advertising_added(sk, hdev, cp->instance);
8660
8661 if (hdev->cur_adv_instance == cp->instance) {
8662 /* If the currently advertised instance is being changed then
8663 * cancel the current advertising and schedule the next
8664 * instance. If there is only one instance then the overridden
8665 * advertising data will be visible right away.
8666 */
8667 cancel_adv_timeout(hdev);
8668
8669 next_instance = hci_get_next_instance(hdev, cp->instance);
8670 if (next_instance)
8671 schedule_instance = next_instance->instance;
8672 } else if (!hdev->adv_instance_timeout) {
8673 /* Immediately advertise the new instance if no other
8674 * instance is currently being advertised.
8675 */
8676 schedule_instance = cp->instance;
8677 }
8678
8679 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8680 * there is no instance to be advertised then we have no HCI
8681 * communication to make. Simply return.
8682 */
8683 if (!hdev_is_powered(hdev) ||
8684 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8685 !schedule_instance) {
8686 rp.instance = cp->instance;
8687 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8688 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8689 goto unlock;
8690 }
8691
8692 /* We're good to go, update advertising data, parameters, and start
8693 * advertising.
8694 */
8695 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8696 data_len);
8697 if (!cmd) {
8698 err = -ENOMEM;
8699 goto unlock;
8700 }
8701
8702 cp->instance = schedule_instance;
8703
8704 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8705 add_advertising_complete);
8706 if (err < 0)
8707 mgmt_pending_free(cmd);
8708
8709 unlock:
8710 hci_dev_unlock(hdev);
8711
8712 return err;
8713 }
8714
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8715 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8716 int err)
8717 {
8718 struct mgmt_pending_cmd *cmd = data;
8719 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8720 struct mgmt_rp_add_ext_adv_params rp;
8721 struct adv_info *adv;
8722 u32 flags;
8723
8724 BT_DBG("%s", hdev->name);
8725
8726 hci_dev_lock(hdev);
8727
8728 adv = hci_find_adv_instance(hdev, cp->instance);
8729 if (!adv)
8730 goto unlock;
8731
8732 rp.instance = cp->instance;
8733 rp.tx_power = adv->tx_power;
8734
8735 /* While we're at it, inform userspace of the available space for this
8736 * advertisement, given the flags that will be used.
8737 */
8738 flags = __le32_to_cpu(cp->flags);
8739 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8740 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8741
8742 if (err) {
8743 /* If this advertisement was previously advertising and we
8744 * failed to update it, we signal that it has been removed and
8745 * delete its structure
8746 */
8747 if (!adv->pending)
8748 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8749
8750 hci_remove_adv_instance(hdev, cp->instance);
8751
8752 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8753 mgmt_status(err));
8754 } else {
8755 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8756 mgmt_status(err), &rp, sizeof(rp));
8757 }
8758
8759 unlock:
8760 if (cmd)
8761 mgmt_pending_free(cmd);
8762
8763 hci_dev_unlock(hdev);
8764 }
8765
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8766 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8767 {
8768 struct mgmt_pending_cmd *cmd = data;
8769 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8770
8771 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8772 }
8773
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8774 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8775 void *data, u16 data_len)
8776 {
8777 struct mgmt_cp_add_ext_adv_params *cp = data;
8778 struct mgmt_rp_add_ext_adv_params rp;
8779 struct mgmt_pending_cmd *cmd = NULL;
8780 struct adv_info *adv;
8781 u32 flags, min_interval, max_interval;
8782 u16 timeout, duration;
8783 u8 status;
8784 s8 tx_power;
8785 int err;
8786
8787 BT_DBG("%s", hdev->name);
8788
8789 status = mgmt_le_support(hdev);
8790 if (status)
8791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8792 status);
8793
8794 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8795 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8796 MGMT_STATUS_INVALID_PARAMS);
8797
8798 /* The purpose of breaking add_advertising into two separate MGMT calls
8799 * for params and data is to allow more parameters to be added to this
8800 * structure in the future. For this reason, we verify that we have the
8801 * bare minimum structure we know of when the interface was defined. Any
8802 * extra parameters we don't know about will be ignored in this request.
8803 */
8804 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8805 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8806 MGMT_STATUS_INVALID_PARAMS);
8807
8808 flags = __le32_to_cpu(cp->flags);
8809
8810 if (!requested_adv_flags_are_valid(hdev, flags))
8811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8812 MGMT_STATUS_INVALID_PARAMS);
8813
8814 hci_dev_lock(hdev);
8815
8816 /* In new interface, we require that we are powered to register */
8817 if (!hdev_is_powered(hdev)) {
8818 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8819 MGMT_STATUS_REJECTED);
8820 goto unlock;
8821 }
8822
8823 if (adv_busy(hdev)) {
8824 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8825 MGMT_STATUS_BUSY);
8826 goto unlock;
8827 }
8828
8829 /* Parse defined parameters from request, use defaults otherwise */
8830 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8831 __le16_to_cpu(cp->timeout) : 0;
8832
8833 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8834 __le16_to_cpu(cp->duration) :
8835 hdev->def_multi_adv_rotation_duration;
8836
8837 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8838 __le32_to_cpu(cp->min_interval) :
8839 hdev->le_adv_min_interval;
8840
8841 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8842 __le32_to_cpu(cp->max_interval) :
8843 hdev->le_adv_max_interval;
8844
8845 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8846 cp->tx_power :
8847 HCI_ADV_TX_POWER_NO_PREFERENCE;
8848
8849 /* Create advertising instance with no advertising or response data */
8850 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8851 timeout, duration, tx_power, min_interval,
8852 max_interval, 0);
8853
8854 if (IS_ERR(adv)) {
8855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8856 MGMT_STATUS_FAILED);
8857 goto unlock;
8858 }
8859
8860 /* Submit request for advertising params if ext adv available */
8861 if (ext_adv_capable(hdev)) {
8862 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8863 data, data_len);
8864 if (!cmd) {
8865 err = -ENOMEM;
8866 hci_remove_adv_instance(hdev, cp->instance);
8867 goto unlock;
8868 }
8869
8870 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8871 add_ext_adv_params_complete);
8872 if (err < 0)
8873 mgmt_pending_free(cmd);
8874 } else {
8875 rp.instance = cp->instance;
8876 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8877 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8878 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8879 err = mgmt_cmd_complete(sk, hdev->id,
8880 MGMT_OP_ADD_EXT_ADV_PARAMS,
8881 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8882 }
8883
8884 unlock:
8885 hci_dev_unlock(hdev);
8886
8887 return err;
8888 }
8889
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8890 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8891 {
8892 struct mgmt_pending_cmd *cmd = data;
8893 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8894 struct mgmt_rp_add_advertising rp;
8895
8896 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8897
8898 memset(&rp, 0, sizeof(rp));
8899
8900 rp.instance = cp->instance;
8901
8902 if (err)
8903 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8904 mgmt_status(err));
8905 else
8906 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8907 mgmt_status(err), &rp, sizeof(rp));
8908
8909 mgmt_pending_free(cmd);
8910 }
8911
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8912 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8913 {
8914 struct mgmt_pending_cmd *cmd = data;
8915 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8916 int err;
8917
8918 if (ext_adv_capable(hdev)) {
8919 err = hci_update_adv_data_sync(hdev, cp->instance);
8920 if (err)
8921 return err;
8922
8923 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8924 if (err)
8925 return err;
8926
8927 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8928 }
8929
8930 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8931 }
8932
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8933 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8934 u16 data_len)
8935 {
8936 struct mgmt_cp_add_ext_adv_data *cp = data;
8937 struct mgmt_rp_add_ext_adv_data rp;
8938 u8 schedule_instance = 0;
8939 struct adv_info *next_instance;
8940 struct adv_info *adv_instance;
8941 int err = 0;
8942 struct mgmt_pending_cmd *cmd;
8943
8944 BT_DBG("%s", hdev->name);
8945
8946 hci_dev_lock(hdev);
8947
8948 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8949
8950 if (!adv_instance) {
8951 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8952 MGMT_STATUS_INVALID_PARAMS);
8953 goto unlock;
8954 }
8955
8956 /* In new interface, we require that we are powered to register */
8957 if (!hdev_is_powered(hdev)) {
8958 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8959 MGMT_STATUS_REJECTED);
8960 goto clear_new_instance;
8961 }
8962
8963 if (adv_busy(hdev)) {
8964 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8965 MGMT_STATUS_BUSY);
8966 goto clear_new_instance;
8967 }
8968
8969 /* Validate new data */
8970 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8971 cp->adv_data_len, true) ||
8972 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8973 cp->adv_data_len, cp->scan_rsp_len, false)) {
8974 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8975 MGMT_STATUS_INVALID_PARAMS);
8976 goto clear_new_instance;
8977 }
8978
8979 /* Set the data in the advertising instance */
8980 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8981 cp->data, cp->scan_rsp_len,
8982 cp->data + cp->adv_data_len);
8983
8984 /* If using software rotation, determine next instance to use */
8985 if (hdev->cur_adv_instance == cp->instance) {
8986 /* If the currently advertised instance is being changed
8987 * then cancel the current advertising and schedule the
8988 * next instance. If there is only one instance then the
8989 * overridden advertising data will be visible right
8990 * away
8991 */
8992 cancel_adv_timeout(hdev);
8993
8994 next_instance = hci_get_next_instance(hdev, cp->instance);
8995 if (next_instance)
8996 schedule_instance = next_instance->instance;
8997 } else if (!hdev->adv_instance_timeout) {
8998 /* Immediately advertise the new instance if no other
8999 * instance is currently being advertised.
9000 */
9001 schedule_instance = cp->instance;
9002 }
9003
9004 /* If the HCI_ADVERTISING flag is set or there is no instance to
9005 * be advertised then we have no HCI communication to make.
9006 * Simply return.
9007 */
9008 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9009 if (adv_instance->pending) {
9010 mgmt_advertising_added(sk, hdev, cp->instance);
9011 adv_instance->pending = false;
9012 }
9013 rp.instance = cp->instance;
9014 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9015 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9016 goto unlock;
9017 }
9018
9019 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9020 data_len);
9021 if (!cmd) {
9022 err = -ENOMEM;
9023 goto clear_new_instance;
9024 }
9025
9026 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9027 add_ext_adv_data_complete);
9028 if (err < 0) {
9029 mgmt_pending_free(cmd);
9030 goto clear_new_instance;
9031 }
9032
9033 /* We were successful in updating data, so trigger advertising_added
9034 * event if this is an instance that wasn't previously advertising. If
9035 * a failure occurs in the requests we initiated, we will remove the
9036 * instance again in add_advertising_complete
9037 */
9038 if (adv_instance->pending)
9039 mgmt_advertising_added(sk, hdev, cp->instance);
9040
9041 goto unlock;
9042
9043 clear_new_instance:
9044 hci_remove_adv_instance(hdev, cp->instance);
9045
9046 unlock:
9047 hci_dev_unlock(hdev);
9048
9049 return err;
9050 }
9051
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9052 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9053 int err)
9054 {
9055 struct mgmt_pending_cmd *cmd = data;
9056 struct mgmt_cp_remove_advertising *cp = cmd->param;
9057 struct mgmt_rp_remove_advertising rp;
9058
9059 bt_dev_dbg(hdev, "err %d", err);
9060
9061 memset(&rp, 0, sizeof(rp));
9062 rp.instance = cp->instance;
9063
9064 if (err)
9065 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9066 mgmt_status(err));
9067 else
9068 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9069 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9070
9071 mgmt_pending_free(cmd);
9072 }
9073
remove_advertising_sync(struct hci_dev * hdev,void * data)9074 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9075 {
9076 struct mgmt_pending_cmd *cmd = data;
9077 struct mgmt_cp_remove_advertising *cp = cmd->param;
9078 int err;
9079
9080 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9081 if (err)
9082 return err;
9083
9084 if (list_empty(&hdev->adv_instances))
9085 err = hci_disable_advertising_sync(hdev);
9086
9087 return err;
9088 }
9089
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9090 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9091 void *data, u16 data_len)
9092 {
9093 struct mgmt_cp_remove_advertising *cp = data;
9094 struct mgmt_pending_cmd *cmd;
9095 int err;
9096
9097 bt_dev_dbg(hdev, "sock %p", sk);
9098
9099 hci_dev_lock(hdev);
9100
9101 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9102 err = mgmt_cmd_status(sk, hdev->id,
9103 MGMT_OP_REMOVE_ADVERTISING,
9104 MGMT_STATUS_INVALID_PARAMS);
9105 goto unlock;
9106 }
9107
9108 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9109 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9110 MGMT_STATUS_BUSY);
9111 goto unlock;
9112 }
9113
9114 if (list_empty(&hdev->adv_instances)) {
9115 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9116 MGMT_STATUS_INVALID_PARAMS);
9117 goto unlock;
9118 }
9119
9120 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9121 data_len);
9122 if (!cmd) {
9123 err = -ENOMEM;
9124 goto unlock;
9125 }
9126
9127 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9128 remove_advertising_complete);
9129 if (err < 0)
9130 mgmt_pending_free(cmd);
9131
9132 unlock:
9133 hci_dev_unlock(hdev);
9134
9135 return err;
9136 }
9137
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9138 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9139 void *data, u16 data_len)
9140 {
9141 struct mgmt_cp_get_adv_size_info *cp = data;
9142 struct mgmt_rp_get_adv_size_info rp;
9143 u32 flags, supported_flags;
9144
9145 bt_dev_dbg(hdev, "sock %p", sk);
9146
9147 if (!lmp_le_capable(hdev))
9148 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9149 MGMT_STATUS_REJECTED);
9150
9151 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9152 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9153 MGMT_STATUS_INVALID_PARAMS);
9154
9155 flags = __le32_to_cpu(cp->flags);
9156
9157 /* The current implementation only supports a subset of the specified
9158 * flags.
9159 */
9160 supported_flags = get_supported_adv_flags(hdev);
9161 if (flags & ~supported_flags)
9162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9163 MGMT_STATUS_INVALID_PARAMS);
9164
9165 rp.instance = cp->instance;
9166 rp.flags = cp->flags;
9167 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9168 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9169
9170 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9171 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9172 }
9173
9174 static const struct hci_mgmt_handler mgmt_handlers[] = {
9175 { NULL }, /* 0x0000 (no command) */
9176 { read_version, MGMT_READ_VERSION_SIZE,
9177 HCI_MGMT_NO_HDEV |
9178 HCI_MGMT_UNTRUSTED },
9179 { read_commands, MGMT_READ_COMMANDS_SIZE,
9180 HCI_MGMT_NO_HDEV |
9181 HCI_MGMT_UNTRUSTED },
9182 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9183 HCI_MGMT_NO_HDEV |
9184 HCI_MGMT_UNTRUSTED },
9185 { read_controller_info, MGMT_READ_INFO_SIZE,
9186 HCI_MGMT_UNTRUSTED },
9187 { set_powered, MGMT_SETTING_SIZE },
9188 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9189 { set_connectable, MGMT_SETTING_SIZE },
9190 { set_fast_connectable, MGMT_SETTING_SIZE },
9191 { set_bondable, MGMT_SETTING_SIZE },
9192 { set_link_security, MGMT_SETTING_SIZE },
9193 { set_ssp, MGMT_SETTING_SIZE },
9194 { set_hs, MGMT_SETTING_SIZE },
9195 { set_le, MGMT_SETTING_SIZE },
9196 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9197 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9198 { add_uuid, MGMT_ADD_UUID_SIZE },
9199 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9200 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9201 HCI_MGMT_VAR_LEN },
9202 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9203 HCI_MGMT_VAR_LEN },
9204 { disconnect, MGMT_DISCONNECT_SIZE },
9205 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9206 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9207 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9208 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9209 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9210 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9211 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9212 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9213 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9214 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9215 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9216 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9217 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9218 HCI_MGMT_VAR_LEN },
9219 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9220 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9221 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9222 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9223 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9224 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9225 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9226 { set_advertising, MGMT_SETTING_SIZE },
9227 { set_bredr, MGMT_SETTING_SIZE },
9228 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9229 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9230 { set_secure_conn, MGMT_SETTING_SIZE },
9231 { set_debug_keys, MGMT_SETTING_SIZE },
9232 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9233 { load_irks, MGMT_LOAD_IRKS_SIZE,
9234 HCI_MGMT_VAR_LEN },
9235 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9236 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9237 { add_device, MGMT_ADD_DEVICE_SIZE },
9238 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9239 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9240 HCI_MGMT_VAR_LEN },
9241 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9242 HCI_MGMT_NO_HDEV |
9243 HCI_MGMT_UNTRUSTED },
9244 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9245 HCI_MGMT_UNCONFIGURED |
9246 HCI_MGMT_UNTRUSTED },
9247 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9248 HCI_MGMT_UNCONFIGURED },
9249 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9250 HCI_MGMT_UNCONFIGURED },
9251 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9252 HCI_MGMT_VAR_LEN },
9253 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9254 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9255 HCI_MGMT_NO_HDEV |
9256 HCI_MGMT_UNTRUSTED },
9257 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9258 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9259 HCI_MGMT_VAR_LEN },
9260 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9261 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9262 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9263 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9264 HCI_MGMT_UNTRUSTED },
9265 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9266 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9267 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9268 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9269 HCI_MGMT_VAR_LEN },
9270 { set_wideband_speech, MGMT_SETTING_SIZE },
9271 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9272 HCI_MGMT_UNTRUSTED },
9273 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9274 HCI_MGMT_UNTRUSTED |
9275 HCI_MGMT_HDEV_OPTIONAL },
9276 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9277 HCI_MGMT_VAR_LEN |
9278 HCI_MGMT_HDEV_OPTIONAL },
9279 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9280 HCI_MGMT_UNTRUSTED },
9281 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9282 HCI_MGMT_VAR_LEN },
9283 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9284 HCI_MGMT_UNTRUSTED },
9285 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9286 HCI_MGMT_VAR_LEN },
9287 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9288 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9289 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9290 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9291 HCI_MGMT_VAR_LEN },
9292 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9293 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9294 HCI_MGMT_VAR_LEN },
9295 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9296 HCI_MGMT_VAR_LEN },
9297 { add_adv_patterns_monitor_rssi,
9298 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9299 HCI_MGMT_VAR_LEN },
9300 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9301 HCI_MGMT_VAR_LEN },
9302 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9303 { mesh_send, MGMT_MESH_SEND_SIZE,
9304 HCI_MGMT_VAR_LEN },
9305 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9306 };
9307
mgmt_index_added(struct hci_dev * hdev)9308 void mgmt_index_added(struct hci_dev *hdev)
9309 {
9310 struct mgmt_ev_ext_index ev;
9311
9312 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9313 return;
9314
9315 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9316 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9317 HCI_MGMT_UNCONF_INDEX_EVENTS);
9318 ev.type = 0x01;
9319 } else {
9320 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9321 HCI_MGMT_INDEX_EVENTS);
9322 ev.type = 0x00;
9323 }
9324
9325 ev.bus = hdev->bus;
9326
9327 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9328 HCI_MGMT_EXT_INDEX_EVENTS);
9329 }
9330
mgmt_index_removed(struct hci_dev * hdev)9331 void mgmt_index_removed(struct hci_dev *hdev)
9332 {
9333 struct mgmt_ev_ext_index ev;
9334 u8 status = MGMT_STATUS_INVALID_INDEX;
9335
9336 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9337 return;
9338
9339 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9340
9341 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9342 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9343 HCI_MGMT_UNCONF_INDEX_EVENTS);
9344 ev.type = 0x01;
9345 } else {
9346 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9347 HCI_MGMT_INDEX_EVENTS);
9348 ev.type = 0x00;
9349 }
9350
9351 ev.bus = hdev->bus;
9352
9353 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9354 HCI_MGMT_EXT_INDEX_EVENTS);
9355
9356 /* Cancel any remaining timed work */
9357 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9358 return;
9359 cancel_delayed_work_sync(&hdev->discov_off);
9360 cancel_delayed_work_sync(&hdev->service_cache);
9361 cancel_delayed_work_sync(&hdev->rpa_expired);
9362 }
9363
mgmt_power_on(struct hci_dev * hdev,int err)9364 void mgmt_power_on(struct hci_dev *hdev, int err)
9365 {
9366 struct cmd_lookup match = { NULL, hdev };
9367
9368 bt_dev_dbg(hdev, "err %d", err);
9369
9370 hci_dev_lock(hdev);
9371
9372 if (!err) {
9373 restart_le_actions(hdev);
9374 hci_update_passive_scan(hdev);
9375 }
9376
9377 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9378
9379 new_settings(hdev, match.sk);
9380
9381 if (match.sk)
9382 sock_put(match.sk);
9383
9384 hci_dev_unlock(hdev);
9385 }
9386
__mgmt_power_off(struct hci_dev * hdev)9387 void __mgmt_power_off(struct hci_dev *hdev)
9388 {
9389 struct cmd_lookup match = { NULL, hdev };
9390 u8 status, zero_cod[] = { 0, 0, 0 };
9391
9392 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9393
9394 /* If the power off is because of hdev unregistration let
9395 * use the appropriate INVALID_INDEX status. Otherwise use
9396 * NOT_POWERED. We cover both scenarios here since later in
9397 * mgmt_index_removed() any hci_conn callbacks will have already
9398 * been triggered, potentially causing misleading DISCONNECTED
9399 * status responses.
9400 */
9401 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9402 status = MGMT_STATUS_INVALID_INDEX;
9403 else
9404 status = MGMT_STATUS_NOT_POWERED;
9405
9406 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9407
9408 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9409 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9410 zero_cod, sizeof(zero_cod),
9411 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9412 ext_info_changed(hdev, NULL);
9413 }
9414
9415 new_settings(hdev, match.sk);
9416
9417 if (match.sk)
9418 sock_put(match.sk);
9419 }
9420
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9421 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9422 {
9423 struct mgmt_pending_cmd *cmd;
9424 u8 status;
9425
9426 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9427 if (!cmd)
9428 return;
9429
9430 if (err == -ERFKILL)
9431 status = MGMT_STATUS_RFKILLED;
9432 else
9433 status = MGMT_STATUS_FAILED;
9434
9435 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9436
9437 mgmt_pending_remove(cmd);
9438 }
9439
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9440 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9441 bool persistent)
9442 {
9443 struct mgmt_ev_new_link_key ev;
9444
9445 memset(&ev, 0, sizeof(ev));
9446
9447 ev.store_hint = persistent;
9448 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9449 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9450 ev.key.type = key->type;
9451 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9452 ev.key.pin_len = key->pin_len;
9453
9454 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9455 }
9456
mgmt_ltk_type(struct smp_ltk * ltk)9457 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9458 {
9459 switch (ltk->type) {
9460 case SMP_LTK:
9461 case SMP_LTK_RESPONDER:
9462 if (ltk->authenticated)
9463 return MGMT_LTK_AUTHENTICATED;
9464 return MGMT_LTK_UNAUTHENTICATED;
9465 case SMP_LTK_P256:
9466 if (ltk->authenticated)
9467 return MGMT_LTK_P256_AUTH;
9468 return MGMT_LTK_P256_UNAUTH;
9469 case SMP_LTK_P256_DEBUG:
9470 return MGMT_LTK_P256_DEBUG;
9471 }
9472
9473 return MGMT_LTK_UNAUTHENTICATED;
9474 }
9475
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9476 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9477 {
9478 struct mgmt_ev_new_long_term_key ev;
9479
9480 memset(&ev, 0, sizeof(ev));
9481
9482 /* Devices using resolvable or non-resolvable random addresses
9483 * without providing an identity resolving key don't require
9484 * to store long term keys. Their addresses will change the
9485 * next time around.
9486 *
9487 * Only when a remote device provides an identity address
9488 * make sure the long term key is stored. If the remote
9489 * identity is known, the long term keys are internally
9490 * mapped to the identity address. So allow static random
9491 * and public addresses here.
9492 */
9493 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9494 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9495 ev.store_hint = 0x00;
9496 else
9497 ev.store_hint = persistent;
9498
9499 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9500 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9501 ev.key.type = mgmt_ltk_type(key);
9502 ev.key.enc_size = key->enc_size;
9503 ev.key.ediv = key->ediv;
9504 ev.key.rand = key->rand;
9505
9506 if (key->type == SMP_LTK)
9507 ev.key.initiator = 1;
9508
9509 /* Make sure we copy only the significant bytes based on the
9510 * encryption key size, and set the rest of the value to zeroes.
9511 */
9512 memcpy(ev.key.val, key->val, key->enc_size);
9513 memset(ev.key.val + key->enc_size, 0,
9514 sizeof(ev.key.val) - key->enc_size);
9515
9516 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9517 }
9518
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9519 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9520 {
9521 struct mgmt_ev_new_irk ev;
9522
9523 memset(&ev, 0, sizeof(ev));
9524
9525 ev.store_hint = persistent;
9526
9527 bacpy(&ev.rpa, &irk->rpa);
9528 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9529 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9530 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9531
9532 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9533 }
9534
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9535 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9536 bool persistent)
9537 {
9538 struct mgmt_ev_new_csrk ev;
9539
9540 memset(&ev, 0, sizeof(ev));
9541
9542 /* Devices using resolvable or non-resolvable random addresses
9543 * without providing an identity resolving key don't require
9544 * to store signature resolving keys. Their addresses will change
9545 * the next time around.
9546 *
9547 * Only when a remote device provides an identity address
9548 * make sure the signature resolving key is stored. So allow
9549 * static random and public addresses here.
9550 */
9551 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9552 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9553 ev.store_hint = 0x00;
9554 else
9555 ev.store_hint = persistent;
9556
9557 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9558 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9559 ev.key.type = csrk->type;
9560 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9561
9562 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9563 }
9564
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9565 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9566 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9567 u16 max_interval, u16 latency, u16 timeout)
9568 {
9569 struct mgmt_ev_new_conn_param ev;
9570
9571 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9572 return;
9573
9574 memset(&ev, 0, sizeof(ev));
9575 bacpy(&ev.addr.bdaddr, bdaddr);
9576 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9577 ev.store_hint = store_hint;
9578 ev.min_interval = cpu_to_le16(min_interval);
9579 ev.max_interval = cpu_to_le16(max_interval);
9580 ev.latency = cpu_to_le16(latency);
9581 ev.timeout = cpu_to_le16(timeout);
9582
9583 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9584 }
9585
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9586 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9587 u8 *name, u8 name_len)
9588 {
9589 struct sk_buff *skb;
9590 struct mgmt_ev_device_connected *ev;
9591 u16 eir_len = 0;
9592 u32 flags = 0;
9593
9594 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9595 return;
9596
9597 /* allocate buff for LE or BR/EDR adv */
9598 if (conn->le_adv_data_len > 0)
9599 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9600 sizeof(*ev) + conn->le_adv_data_len);
9601 else
9602 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9603 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9604 eir_precalc_len(sizeof(conn->dev_class)));
9605
9606 ev = skb_put(skb, sizeof(*ev));
9607 bacpy(&ev->addr.bdaddr, &conn->dst);
9608 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9609
9610 if (conn->out)
9611 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9612
9613 ev->flags = __cpu_to_le32(flags);
9614
9615 /* We must ensure that the EIR Data fields are ordered and
9616 * unique. Keep it simple for now and avoid the problem by not
9617 * adding any BR/EDR data to the LE adv.
9618 */
9619 if (conn->le_adv_data_len > 0) {
9620 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9621 eir_len = conn->le_adv_data_len;
9622 } else {
9623 if (name)
9624 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9625
9626 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9627 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9628 conn->dev_class, sizeof(conn->dev_class));
9629 }
9630
9631 ev->eir_len = cpu_to_le16(eir_len);
9632
9633 mgmt_event_skb(skb, NULL);
9634 }
9635
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)9636 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9637 {
9638 struct sock **sk = data;
9639
9640 cmd->cmd_complete(cmd, 0);
9641
9642 *sk = cmd->sk;
9643 sock_hold(*sk);
9644
9645 mgmt_pending_remove(cmd);
9646 }
9647
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9648 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9649 {
9650 struct hci_dev *hdev = data;
9651 struct mgmt_cp_unpair_device *cp = cmd->param;
9652
9653 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9654
9655 cmd->cmd_complete(cmd, 0);
9656 mgmt_pending_remove(cmd);
9657 }
9658
mgmt_powering_down(struct hci_dev * hdev)9659 bool mgmt_powering_down(struct hci_dev *hdev)
9660 {
9661 struct mgmt_pending_cmd *cmd;
9662 struct mgmt_mode *cp;
9663
9664 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9665 if (!cmd)
9666 return false;
9667
9668 cp = cmd->param;
9669 if (!cp->val)
9670 return true;
9671
9672 return false;
9673 }
9674
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9675 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9676 u8 link_type, u8 addr_type, u8 reason,
9677 bool mgmt_connected)
9678 {
9679 struct mgmt_ev_device_disconnected ev;
9680 struct sock *sk = NULL;
9681
9682 if (!mgmt_connected)
9683 return;
9684
9685 if (link_type != ACL_LINK && link_type != LE_LINK)
9686 return;
9687
9688 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9689
9690 bacpy(&ev.addr.bdaddr, bdaddr);
9691 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9692 ev.reason = reason;
9693
9694 /* Report disconnects due to suspend */
9695 if (hdev->suspended)
9696 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9697
9698 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9699
9700 if (sk)
9701 sock_put(sk);
9702
9703 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9704 hdev);
9705 }
9706
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9707 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9708 u8 link_type, u8 addr_type, u8 status)
9709 {
9710 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9711 struct mgmt_cp_disconnect *cp;
9712 struct mgmt_pending_cmd *cmd;
9713
9714 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9715 hdev);
9716
9717 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9718 if (!cmd)
9719 return;
9720
9721 cp = cmd->param;
9722
9723 if (bacmp(bdaddr, &cp->addr.bdaddr))
9724 return;
9725
9726 if (cp->addr.type != bdaddr_type)
9727 return;
9728
9729 cmd->cmd_complete(cmd, mgmt_status(status));
9730 mgmt_pending_remove(cmd);
9731 }
9732
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9733 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9734 u8 addr_type, u8 status)
9735 {
9736 struct mgmt_ev_connect_failed ev;
9737
9738 bacpy(&ev.addr.bdaddr, bdaddr);
9739 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9740 ev.status = mgmt_status(status);
9741
9742 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9743 }
9744
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9745 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9746 {
9747 struct mgmt_ev_pin_code_request ev;
9748
9749 bacpy(&ev.addr.bdaddr, bdaddr);
9750 ev.addr.type = BDADDR_BREDR;
9751 ev.secure = secure;
9752
9753 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9754 }
9755
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9756 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9757 u8 status)
9758 {
9759 struct mgmt_pending_cmd *cmd;
9760
9761 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9762 if (!cmd)
9763 return;
9764
9765 cmd->cmd_complete(cmd, mgmt_status(status));
9766 mgmt_pending_remove(cmd);
9767 }
9768
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9769 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9770 u8 status)
9771 {
9772 struct mgmt_pending_cmd *cmd;
9773
9774 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9775 if (!cmd)
9776 return;
9777
9778 cmd->cmd_complete(cmd, mgmt_status(status));
9779 mgmt_pending_remove(cmd);
9780 }
9781
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9782 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9783 u8 link_type, u8 addr_type, u32 value,
9784 u8 confirm_hint)
9785 {
9786 struct mgmt_ev_user_confirm_request ev;
9787
9788 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9789
9790 bacpy(&ev.addr.bdaddr, bdaddr);
9791 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9792 ev.confirm_hint = confirm_hint;
9793 ev.value = cpu_to_le32(value);
9794
9795 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9796 NULL);
9797 }
9798
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9799 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9800 u8 link_type, u8 addr_type)
9801 {
9802 struct mgmt_ev_user_passkey_request ev;
9803
9804 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9805
9806 bacpy(&ev.addr.bdaddr, bdaddr);
9807 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9808
9809 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9810 NULL);
9811 }
9812
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9813 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9814 u8 link_type, u8 addr_type, u8 status,
9815 u8 opcode)
9816 {
9817 struct mgmt_pending_cmd *cmd;
9818
9819 cmd = pending_find(opcode, hdev);
9820 if (!cmd)
9821 return -ENOENT;
9822
9823 cmd->cmd_complete(cmd, mgmt_status(status));
9824 mgmt_pending_remove(cmd);
9825
9826 return 0;
9827 }
9828
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9829 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9830 u8 link_type, u8 addr_type, u8 status)
9831 {
9832 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9833 status, MGMT_OP_USER_CONFIRM_REPLY);
9834 }
9835
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9836 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9837 u8 link_type, u8 addr_type, u8 status)
9838 {
9839 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9840 status,
9841 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9842 }
9843
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9844 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9845 u8 link_type, u8 addr_type, u8 status)
9846 {
9847 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9848 status, MGMT_OP_USER_PASSKEY_REPLY);
9849 }
9850
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9851 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9852 u8 link_type, u8 addr_type, u8 status)
9853 {
9854 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9855 status,
9856 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9857 }
9858
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9859 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9860 u8 link_type, u8 addr_type, u32 passkey,
9861 u8 entered)
9862 {
9863 struct mgmt_ev_passkey_notify ev;
9864
9865 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9866
9867 bacpy(&ev.addr.bdaddr, bdaddr);
9868 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9869 ev.passkey = __cpu_to_le32(passkey);
9870 ev.entered = entered;
9871
9872 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9873 }
9874
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9875 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9876 {
9877 struct mgmt_ev_auth_failed ev;
9878 struct mgmt_pending_cmd *cmd;
9879 u8 status = mgmt_status(hci_status);
9880
9881 bacpy(&ev.addr.bdaddr, &conn->dst);
9882 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9883 ev.status = status;
9884
9885 cmd = find_pairing(conn);
9886
9887 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9888 cmd ? cmd->sk : NULL);
9889
9890 if (cmd) {
9891 cmd->cmd_complete(cmd, status);
9892 mgmt_pending_remove(cmd);
9893 }
9894 }
9895
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9896 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9897 {
9898 struct cmd_lookup match = { NULL, hdev };
9899 bool changed;
9900
9901 if (status) {
9902 u8 mgmt_err = mgmt_status(status);
9903 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9904 cmd_status_rsp, &mgmt_err);
9905 return;
9906 }
9907
9908 if (test_bit(HCI_AUTH, &hdev->flags))
9909 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9910 else
9911 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9912
9913 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9914 &match);
9915
9916 if (changed)
9917 new_settings(hdev, match.sk);
9918
9919 if (match.sk)
9920 sock_put(match.sk);
9921 }
9922
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9923 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9924 {
9925 struct cmd_lookup *match = data;
9926
9927 if (match->sk == NULL) {
9928 match->sk = cmd->sk;
9929 sock_hold(match->sk);
9930 }
9931 }
9932
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9933 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9934 u8 status)
9935 {
9936 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9937
9938 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9939 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9940 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9941
9942 if (!status) {
9943 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9944 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9945 ext_info_changed(hdev, NULL);
9946 }
9947
9948 if (match.sk)
9949 sock_put(match.sk);
9950 }
9951
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9952 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9953 {
9954 struct mgmt_cp_set_local_name ev;
9955 struct mgmt_pending_cmd *cmd;
9956
9957 if (status)
9958 return;
9959
9960 memset(&ev, 0, sizeof(ev));
9961 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9962 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9963
9964 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9965 if (!cmd) {
9966 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9967
9968 /* If this is a HCI command related to powering on the
9969 * HCI dev don't send any mgmt signals.
9970 */
9971 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9972 return;
9973 }
9974
9975 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9976 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9977 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9978 }
9979
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])9980 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9981 {
9982 int i;
9983
9984 for (i = 0; i < uuid_count; i++) {
9985 if (!memcmp(uuid, uuids[i], 16))
9986 return true;
9987 }
9988
9989 return false;
9990 }
9991
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])9992 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9993 {
9994 u16 parsed = 0;
9995
9996 while (parsed < eir_len) {
9997 u8 field_len = eir[0];
9998 u8 uuid[16];
9999 int i;
10000
10001 if (field_len == 0)
10002 break;
10003
10004 if (eir_len - parsed < field_len + 1)
10005 break;
10006
10007 switch (eir[1]) {
10008 case EIR_UUID16_ALL:
10009 case EIR_UUID16_SOME:
10010 for (i = 0; i + 3 <= field_len; i += 2) {
10011 memcpy(uuid, bluetooth_base_uuid, 16);
10012 uuid[13] = eir[i + 3];
10013 uuid[12] = eir[i + 2];
10014 if (has_uuid(uuid, uuid_count, uuids))
10015 return true;
10016 }
10017 break;
10018 case EIR_UUID32_ALL:
10019 case EIR_UUID32_SOME:
10020 for (i = 0; i + 5 <= field_len; i += 4) {
10021 memcpy(uuid, bluetooth_base_uuid, 16);
10022 uuid[15] = eir[i + 5];
10023 uuid[14] = eir[i + 4];
10024 uuid[13] = eir[i + 3];
10025 uuid[12] = eir[i + 2];
10026 if (has_uuid(uuid, uuid_count, uuids))
10027 return true;
10028 }
10029 break;
10030 case EIR_UUID128_ALL:
10031 case EIR_UUID128_SOME:
10032 for (i = 0; i + 17 <= field_len; i += 16) {
10033 memcpy(uuid, eir + i + 2, 16);
10034 if (has_uuid(uuid, uuid_count, uuids))
10035 return true;
10036 }
10037 break;
10038 }
10039
10040 parsed += field_len + 1;
10041 eir += field_len + 1;
10042 }
10043
10044 return false;
10045 }
10046
restart_le_scan(struct hci_dev * hdev)10047 static void restart_le_scan(struct hci_dev *hdev)
10048 {
10049 /* If controller is not scanning we are done. */
10050 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10051 return;
10052
10053 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10054 hdev->discovery.scan_start +
10055 hdev->discovery.scan_duration))
10056 return;
10057
10058 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10059 DISCOV_LE_RESTART_DELAY);
10060 }
10061
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10062 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10063 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10064 {
10065 /* If a RSSI threshold has been specified, and
10066 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10067 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10068 * is set, let it through for further processing, as we might need to
10069 * restart the scan.
10070 *
10071 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10072 * the results are also dropped.
10073 */
10074 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10075 (rssi == HCI_RSSI_INVALID ||
10076 (rssi < hdev->discovery.rssi &&
10077 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10078 return false;
10079
10080 if (hdev->discovery.uuid_count != 0) {
10081 /* If a list of UUIDs is provided in filter, results with no
10082 * matching UUID should be dropped.
10083 */
10084 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10085 hdev->discovery.uuids) &&
10086 !eir_has_uuids(scan_rsp, scan_rsp_len,
10087 hdev->discovery.uuid_count,
10088 hdev->discovery.uuids))
10089 return false;
10090 }
10091
10092 /* If duplicate filtering does not report RSSI changes, then restart
10093 * scanning to ensure updated result with updated RSSI values.
10094 */
10095 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10096 restart_le_scan(hdev);
10097
10098 /* Validate RSSI value against the RSSI threshold once more. */
10099 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10100 rssi < hdev->discovery.rssi)
10101 return false;
10102 }
10103
10104 return true;
10105 }
10106
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10107 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10108 bdaddr_t *bdaddr, u8 addr_type)
10109 {
10110 struct mgmt_ev_adv_monitor_device_lost ev;
10111
10112 ev.monitor_handle = cpu_to_le16(handle);
10113 bacpy(&ev.addr.bdaddr, bdaddr);
10114 ev.addr.type = addr_type;
10115
10116 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10117 NULL);
10118 }
10119
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10120 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10121 struct sk_buff *skb,
10122 struct sock *skip_sk,
10123 u16 handle)
10124 {
10125 struct sk_buff *advmon_skb;
10126 size_t advmon_skb_len;
10127 __le16 *monitor_handle;
10128
10129 if (!skb)
10130 return;
10131
10132 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10133 sizeof(struct mgmt_ev_device_found)) + skb->len;
10134 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10135 advmon_skb_len);
10136 if (!advmon_skb)
10137 return;
10138
10139 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10140 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10141 * store monitor_handle of the matched monitor.
10142 */
10143 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10144 *monitor_handle = cpu_to_le16(handle);
10145 skb_put_data(advmon_skb, skb->data, skb->len);
10146
10147 mgmt_event_skb(advmon_skb, skip_sk);
10148 }
10149
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10150 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10151 bdaddr_t *bdaddr, bool report_device,
10152 struct sk_buff *skb,
10153 struct sock *skip_sk)
10154 {
10155 struct monitored_device *dev, *tmp;
10156 bool matched = false;
10157 bool notified = false;
10158
10159 /* We have received the Advertisement Report because:
10160 * 1. the kernel has initiated active discovery
10161 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10162 * passive scanning
10163 * 3. if none of the above is true, we have one or more active
10164 * Advertisement Monitor
10165 *
10166 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10167 * and report ONLY one advertisement per device for the matched Monitor
10168 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10169 *
10170 * For case 3, since we are not active scanning and all advertisements
10171 * received are due to a matched Advertisement Monitor, report all
10172 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10173 */
10174 if (report_device && !hdev->advmon_pend_notify) {
10175 mgmt_event_skb(skb, skip_sk);
10176 return;
10177 }
10178
10179 hdev->advmon_pend_notify = false;
10180
10181 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10182 if (!bacmp(&dev->bdaddr, bdaddr)) {
10183 matched = true;
10184
10185 if (!dev->notified) {
10186 mgmt_send_adv_monitor_device_found(hdev, skb,
10187 skip_sk,
10188 dev->handle);
10189 notified = true;
10190 dev->notified = true;
10191 }
10192 }
10193
10194 if (!dev->notified)
10195 hdev->advmon_pend_notify = true;
10196 }
10197
10198 if (!report_device &&
10199 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10200 /* Handle 0 indicates that we are not active scanning and this
10201 * is a subsequent advertisement report for an already matched
10202 * Advertisement Monitor or the controller offloading support
10203 * is not available.
10204 */
10205 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10206 }
10207
10208 if (report_device)
10209 mgmt_event_skb(skb, skip_sk);
10210 else
10211 kfree_skb(skb);
10212 }
10213
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10214 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10215 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10216 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10217 u64 instant)
10218 {
10219 struct sk_buff *skb;
10220 struct mgmt_ev_mesh_device_found *ev;
10221 int i, j;
10222
10223 if (!hdev->mesh_ad_types[0])
10224 goto accepted;
10225
10226 /* Scan for requested AD types */
10227 if (eir_len > 0) {
10228 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10229 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10230 if (!hdev->mesh_ad_types[j])
10231 break;
10232
10233 if (hdev->mesh_ad_types[j] == eir[i + 1])
10234 goto accepted;
10235 }
10236 }
10237 }
10238
10239 if (scan_rsp_len > 0) {
10240 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10241 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10242 if (!hdev->mesh_ad_types[j])
10243 break;
10244
10245 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10246 goto accepted;
10247 }
10248 }
10249 }
10250
10251 return;
10252
10253 accepted:
10254 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10255 sizeof(*ev) + eir_len + scan_rsp_len);
10256 if (!skb)
10257 return;
10258
10259 ev = skb_put(skb, sizeof(*ev));
10260
10261 bacpy(&ev->addr.bdaddr, bdaddr);
10262 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10263 ev->rssi = rssi;
10264 ev->flags = cpu_to_le32(flags);
10265 ev->instant = cpu_to_le64(instant);
10266
10267 if (eir_len > 0)
10268 /* Copy EIR or advertising data into event */
10269 skb_put_data(skb, eir, eir_len);
10270
10271 if (scan_rsp_len > 0)
10272 /* Append scan response data to event */
10273 skb_put_data(skb, scan_rsp, scan_rsp_len);
10274
10275 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10276
10277 mgmt_event_skb(skb, NULL);
10278 }
10279
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10280 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10281 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10282 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10283 u64 instant)
10284 {
10285 struct sk_buff *skb;
10286 struct mgmt_ev_device_found *ev;
10287 bool report_device = hci_discovery_active(hdev);
10288
10289 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10290 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10291 eir, eir_len, scan_rsp, scan_rsp_len,
10292 instant);
10293
10294 /* Don't send events for a non-kernel initiated discovery. With
10295 * LE one exception is if we have pend_le_reports > 0 in which
10296 * case we're doing passive scanning and want these events.
10297 */
10298 if (!hci_discovery_active(hdev)) {
10299 if (link_type == ACL_LINK)
10300 return;
10301 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10302 report_device = true;
10303 else if (!hci_is_adv_monitoring(hdev))
10304 return;
10305 }
10306
10307 if (hdev->discovery.result_filtering) {
10308 /* We are using service discovery */
10309 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10310 scan_rsp_len))
10311 return;
10312 }
10313
10314 if (hdev->discovery.limited) {
10315 /* Check for limited discoverable bit */
10316 if (dev_class) {
10317 if (!(dev_class[1] & 0x20))
10318 return;
10319 } else {
10320 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10321 if (!flags || !(flags[0] & LE_AD_LIMITED))
10322 return;
10323 }
10324 }
10325
10326 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10327 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10328 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10329 if (!skb)
10330 return;
10331
10332 ev = skb_put(skb, sizeof(*ev));
10333
10334 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10335 * RSSI value was reported as 0 when not available. This behavior
10336 * is kept when using device discovery. This is required for full
10337 * backwards compatibility with the API.
10338 *
10339 * However when using service discovery, the value 127 will be
10340 * returned when the RSSI is not available.
10341 */
10342 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10343 link_type == ACL_LINK)
10344 rssi = 0;
10345
10346 bacpy(&ev->addr.bdaddr, bdaddr);
10347 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10348 ev->rssi = rssi;
10349 ev->flags = cpu_to_le32(flags);
10350
10351 if (eir_len > 0)
10352 /* Copy EIR or advertising data into event */
10353 skb_put_data(skb, eir, eir_len);
10354
10355 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10356 u8 eir_cod[5];
10357
10358 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10359 dev_class, 3);
10360 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10361 }
10362
10363 if (scan_rsp_len > 0)
10364 /* Append scan response data to event */
10365 skb_put_data(skb, scan_rsp, scan_rsp_len);
10366
10367 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10368
10369 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10370 }
10371
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10372 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10373 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10374 {
10375 struct sk_buff *skb;
10376 struct mgmt_ev_device_found *ev;
10377 u16 eir_len = 0;
10378 u32 flags = 0;
10379
10380 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10381 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10382
10383 ev = skb_put(skb, sizeof(*ev));
10384 bacpy(&ev->addr.bdaddr, bdaddr);
10385 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10386 ev->rssi = rssi;
10387
10388 if (name)
10389 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10390 else
10391 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10392
10393 ev->eir_len = cpu_to_le16(eir_len);
10394 ev->flags = cpu_to_le32(flags);
10395
10396 mgmt_event_skb(skb, NULL);
10397 }
10398
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10399 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10400 {
10401 struct mgmt_ev_discovering ev;
10402
10403 bt_dev_dbg(hdev, "discovering %u", discovering);
10404
10405 memset(&ev, 0, sizeof(ev));
10406 ev.type = hdev->discovery.type;
10407 ev.discovering = discovering;
10408
10409 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10410 }
10411
mgmt_suspending(struct hci_dev * hdev,u8 state)10412 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10413 {
10414 struct mgmt_ev_controller_suspend ev;
10415
10416 ev.suspend_state = state;
10417 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10418 }
10419
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10420 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10421 u8 addr_type)
10422 {
10423 struct mgmt_ev_controller_resume ev;
10424
10425 ev.wake_reason = reason;
10426 if (bdaddr) {
10427 bacpy(&ev.addr.bdaddr, bdaddr);
10428 ev.addr.type = addr_type;
10429 } else {
10430 memset(&ev.addr, 0, sizeof(ev.addr));
10431 }
10432
10433 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10434 }
10435
10436 static struct hci_mgmt_chan chan = {
10437 .channel = HCI_CHANNEL_CONTROL,
10438 .handler_count = ARRAY_SIZE(mgmt_handlers),
10439 .handlers = mgmt_handlers,
10440 .hdev_init = mgmt_init_hdev,
10441 };
10442
mgmt_init(void)10443 int mgmt_init(void)
10444 {
10445 return hci_mgmt_chan_register(&chan);
10446 }
10447
mgmt_exit(void)10448 void mgmt_exit(void)
10449 {
10450 hci_mgmt_chan_unregister(&chan);
10451 }
10452
mgmt_cleanup(struct sock * sk)10453 void mgmt_cleanup(struct sock *sk)
10454 {
10455 struct mgmt_mesh_tx *mesh_tx;
10456 struct hci_dev *hdev;
10457
10458 read_lock(&hci_dev_list_lock);
10459
10460 list_for_each_entry(hdev, &hci_dev_list, list) {
10461 do {
10462 mesh_tx = mgmt_mesh_next(hdev, sk);
10463
10464 if (mesh_tx)
10465 mesh_send_complete(hdev, mesh_tx, true);
10466 } while (mesh_tx);
10467 }
10468
10469 read_unlock(&hci_dev_list_lock);
10470 }
10471