1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
46
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855
856 return settings;
857 }
858
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 u32 settings = 0;
862
863 if (hdev_is_powered(hdev))
864 settings |= MGMT_SETTING_POWERED;
865
866 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 settings |= MGMT_SETTING_CONNECTABLE;
868
869 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 settings |= MGMT_SETTING_DISCOVERABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 settings |= MGMT_SETTING_BONDABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 settings |= MGMT_SETTING_BREDR;
880
881 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 settings |= MGMT_SETTING_LE;
883
884 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 settings |= MGMT_SETTING_LINK_SECURITY;
886
887 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 settings |= MGMT_SETTING_SSP;
889
890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 settings |= MGMT_SETTING_ADVERTISING;
892
893 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 settings |= MGMT_SETTING_SECURE_CONN;
895
896 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 settings |= MGMT_SETTING_DEBUG_KEYS;
898
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 settings |= MGMT_SETTING_PRIVACY;
901
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
905 *
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
909 *
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
912 * be evaluated.
913 */
914 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 settings |= MGMT_SETTING_STATIC_ADDRESS;
919 }
920
921 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923
924 if (cis_central_capable(hdev))
925 settings |= MGMT_SETTING_CIS_CENTRAL;
926
927 if (cis_peripheral_capable(hdev))
928 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929
930 if (bis_capable(hdev))
931 settings |= MGMT_SETTING_ISO_BROADCASTER;
932
933 if (sync_recv_capable(hdev))
934 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935
936 return settings;
937 }
938
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 struct mgmt_pending_cmd *cmd;
947
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
950 */
951 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (cmd) {
953 struct mgmt_mode *cp = cmd->param;
954 if (cp->val == 0x01)
955 return LE_AD_GENERAL;
956 else if (cp->val == 0x02)
957 return LE_AD_LIMITED;
958 } else {
959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 return LE_AD_LIMITED;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 return LE_AD_GENERAL;
963 }
964
965 return 0;
966 }
967
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 struct mgmt_pending_cmd *cmd;
971
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
974 */
975 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 if (cmd) {
977 struct mgmt_mode *cp = cmd->param;
978
979 return cp->val;
980 }
981
982 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 hci_update_eir_sync(hdev);
988 hci_update_class_sync(hdev);
989
990 return 0;
991 }
992
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 service_cache.work);
997
998 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 return;
1000
1001 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1008 * function.
1009 */
1010 if (ext_adv_capable(hdev))
1011 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 else
1013 return hci_enable_advertising_sync(hdev);
1014 }
1015
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 rpa_expired.work);
1020
1021 bt_dev_dbg(hdev, "");
1022
1023 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024
1025 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 return;
1027
1028 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 discov_off.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_lock(hdev);
1041
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1046 */
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1050
1051 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052
1053 mgmt_new_settings(hdev);
1054
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 u8 handle = mesh_tx->handle;
1064
1065 if (!silent)
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1068
1069 mgmt_mesh_remove(mesh_tx);
1070 }
1071
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 struct mgmt_mesh_tx *mesh_tx;
1075
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079
1080 if (mesh_tx)
1081 mesh_send_complete(hdev, mesh_tx, false);
1082
1083 return 0;
1084 }
1085
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (!mesh_tx)
1093 return;
1094
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1097
1098 if (err < 0)
1099 mesh_send_complete(hdev, mesh_tx, false);
1100 else
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1108
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 return;
1111
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 return;
1119
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1130 * it
1131 */
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1139 {
1140 struct mgmt_rp_read_info rp;
1141
1142 bt_dev_dbg(hdev, "sock %p", sk);
1143
1144 hci_dev_lock(hdev);
1145
1146 memset(&rp, 0, sizeof(rp));
1147
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1157
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160
1161 hci_dev_unlock(hdev);
1162
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 sizeof(rp));
1165 }
1166
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 u16 eir_len = 0;
1170 size_t name_len;
1171
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1175
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 hdev->appearance);
1179
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1183
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1187
1188 return eir_len;
1189 }
1190
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1193 {
1194 char buf[512];
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 u16 eir_len;
1197
1198 bt_dev_dbg(hdev, "sock %p", sk);
1199
1200 memset(&buf, 0, sizeof(buf));
1201
1202 hci_dev_lock(hdev);
1203
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211
1212
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1215
1216 hci_dev_unlock(hdev);
1217
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1221 * is used.
1222 */
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1229 }
1230
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 char buf[512];
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 u16 eir_len;
1236
1237 memset(buf, 0, sizeof(buf));
1238
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1241
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 struct mgmt_ev_advertising_added ev;
1258
1259 ev.instance = instance;
1260
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 u8 instance)
1266 {
1267 struct mgmt_ev_advertising_removed ev;
1268
1269 ev.instance = instance;
1270
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1279 }
1280 }
1281
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 struct hci_conn_params *p;
1286
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1290 */
1291 hci_pend_le_list_del_init(p);
1292
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 break;
1298 case HCI_AUTO_CONN_REPORT:
1299 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 break;
1301 default:
1302 break;
1303 }
1304 }
1305 }
1306
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1319
1320 /* Make sure cmd still outstanding. */
1321 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1322 return;
1323
1324 cp = cmd->param;
1325
1326 bt_dev_dbg(hdev, "err %d", err);
1327
1328 if (!err) {
1329 if (cp->val) {
1330 hci_dev_lock(hdev);
1331 restart_le_actions(hdev);
1332 hci_update_passive_scan(hdev);
1333 hci_dev_unlock(hdev);
1334 }
1335
1336 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1337
1338 /* Only call new_setting for power on as power off is deferred
1339 * to hdev->power_off work which does call hci_dev_do_close.
1340 */
1341 if (cp->val)
1342 new_settings(hdev, cmd->sk);
1343 } else {
1344 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1345 mgmt_status(err));
1346 }
1347
1348 mgmt_pending_remove(cmd);
1349 }
1350
set_powered_sync(struct hci_dev * hdev,void * data)1351 static int set_powered_sync(struct hci_dev *hdev, void *data)
1352 {
1353 struct mgmt_pending_cmd *cmd = data;
1354 struct mgmt_mode *cp = cmd->param;
1355
1356 BT_DBG("%s", hdev->name);
1357
1358 return hci_set_powered_sync(hdev, cp->val);
1359 }
1360
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1361 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1362 u16 len)
1363 {
1364 struct mgmt_mode *cp = data;
1365 struct mgmt_pending_cmd *cmd;
1366 int err;
1367
1368 bt_dev_dbg(hdev, "sock %p", sk);
1369
1370 if (cp->val != 0x00 && cp->val != 0x01)
1371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1372 MGMT_STATUS_INVALID_PARAMS);
1373
1374 hci_dev_lock(hdev);
1375
1376 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 MGMT_STATUS_BUSY);
1379 goto failed;
1380 }
1381
1382 if (!!cp->val == hdev_is_powered(hdev)) {
1383 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1384 goto failed;
1385 }
1386
1387 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1388 if (!cmd) {
1389 err = -ENOMEM;
1390 goto failed;
1391 }
1392
1393 /* Cancel potentially blocking sync operation before power off */
1394 if (cp->val == 0x00) {
1395 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1396 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1397 mgmt_set_powered_complete);
1398 } else {
1399 /* Use hci_cmd_sync_submit since hdev might not be running */
1400 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1401 mgmt_set_powered_complete);
1402 }
1403
1404 if (err < 0)
1405 mgmt_pending_remove(cmd);
1406
1407 failed:
1408 hci_dev_unlock(hdev);
1409 return err;
1410 }
1411
mgmt_new_settings(struct hci_dev * hdev)1412 int mgmt_new_settings(struct hci_dev *hdev)
1413 {
1414 return new_settings(hdev, NULL);
1415 }
1416
1417 struct cmd_lookup {
1418 struct sock *sk;
1419 struct hci_dev *hdev;
1420 u8 mgmt_status;
1421 };
1422
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1423 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1424 {
1425 struct cmd_lookup *match = data;
1426
1427 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1428
1429 list_del(&cmd->list);
1430
1431 if (match->sk == NULL) {
1432 match->sk = cmd->sk;
1433 sock_hold(match->sk);
1434 }
1435
1436 mgmt_pending_free(cmd);
1437 }
1438
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1439 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1440 {
1441 u8 *status = data;
1442
1443 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1444 mgmt_pending_remove(cmd);
1445 }
1446
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1447 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 {
1449 if (cmd->cmd_complete) {
1450 u8 *status = data;
1451
1452 cmd->cmd_complete(cmd, *status);
1453 mgmt_pending_remove(cmd);
1454
1455 return;
1456 }
1457
1458 cmd_status_rsp(cmd, data);
1459 }
1460
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1461 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1462 {
1463 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1464 cmd->param, cmd->param_len);
1465 }
1466
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1467 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1468 {
1469 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1470 cmd->param, sizeof(struct mgmt_addr_info));
1471 }
1472
mgmt_bredr_support(struct hci_dev * hdev)1473 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1474 {
1475 if (!lmp_bredr_capable(hdev))
1476 return MGMT_STATUS_NOT_SUPPORTED;
1477 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1478 return MGMT_STATUS_REJECTED;
1479 else
1480 return MGMT_STATUS_SUCCESS;
1481 }
1482
mgmt_le_support(struct hci_dev * hdev)1483 static u8 mgmt_le_support(struct hci_dev *hdev)
1484 {
1485 if (!lmp_le_capable(hdev))
1486 return MGMT_STATUS_NOT_SUPPORTED;
1487 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1488 return MGMT_STATUS_REJECTED;
1489 else
1490 return MGMT_STATUS_SUCCESS;
1491 }
1492
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1493 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1494 int err)
1495 {
1496 struct mgmt_pending_cmd *cmd = data;
1497
1498 bt_dev_dbg(hdev, "err %d", err);
1499
1500 /* Make sure cmd still outstanding. */
1501 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1502 return;
1503
1504 hci_dev_lock(hdev);
1505
1506 if (err) {
1507 u8 mgmt_err = mgmt_status(err);
1508 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1509 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1510 goto done;
1511 }
1512
1513 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 hdev->discov_timeout > 0) {
1515 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1516 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1517 }
1518
1519 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1520 new_settings(hdev, cmd->sk);
1521
1522 done:
1523 mgmt_pending_remove(cmd);
1524 hci_dev_unlock(hdev);
1525 }
1526
set_discoverable_sync(struct hci_dev * hdev,void * data)1527 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1528 {
1529 BT_DBG("%s", hdev->name);
1530
1531 return hci_update_discoverable_sync(hdev);
1532 }
1533
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1534 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1535 u16 len)
1536 {
1537 struct mgmt_cp_set_discoverable *cp = data;
1538 struct mgmt_pending_cmd *cmd;
1539 u16 timeout;
1540 int err;
1541
1542 bt_dev_dbg(hdev, "sock %p", sk);
1543
1544 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1545 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1546 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1547 MGMT_STATUS_REJECTED);
1548
1549 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1550 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1551 MGMT_STATUS_INVALID_PARAMS);
1552
1553 timeout = __le16_to_cpu(cp->timeout);
1554
1555 /* Disabling discoverable requires that no timeout is set,
1556 * and enabling limited discoverable requires a timeout.
1557 */
1558 if ((cp->val == 0x00 && timeout > 0) ||
1559 (cp->val == 0x02 && timeout == 0))
1560 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1561 MGMT_STATUS_INVALID_PARAMS);
1562
1563 hci_dev_lock(hdev);
1564
1565 if (!hdev_is_powered(hdev) && timeout > 0) {
1566 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_NOT_POWERED);
1568 goto failed;
1569 }
1570
1571 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1572 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_BUSY);
1575 goto failed;
1576 }
1577
1578 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_REJECTED);
1581 goto failed;
1582 }
1583
1584 if (hdev->advertising_paused) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_BUSY);
1587 goto failed;
1588 }
1589
1590 if (!hdev_is_powered(hdev)) {
1591 bool changed = false;
1592
1593 /* Setting limited discoverable when powered off is
1594 * not a valid operation since it requires a timeout
1595 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1596 */
1597 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1598 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1599 changed = true;
1600 }
1601
1602 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1603 if (err < 0)
1604 goto failed;
1605
1606 if (changed)
1607 err = new_settings(hdev, sk);
1608
1609 goto failed;
1610 }
1611
1612 /* If the current mode is the same, then just update the timeout
1613 * value with the new value. And if only the timeout gets updated,
1614 * then no need for any HCI transactions.
1615 */
1616 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1617 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1618 HCI_LIMITED_DISCOVERABLE)) {
1619 cancel_delayed_work(&hdev->discov_off);
1620 hdev->discov_timeout = timeout;
1621
1622 if (cp->val && hdev->discov_timeout > 0) {
1623 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1624 queue_delayed_work(hdev->req_workqueue,
1625 &hdev->discov_off, to);
1626 }
1627
1628 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1629 goto failed;
1630 }
1631
1632 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1633 if (!cmd) {
1634 err = -ENOMEM;
1635 goto failed;
1636 }
1637
1638 /* Cancel any potential discoverable timeout that might be
1639 * still active and store new timeout value. The arming of
1640 * the timeout happens in the complete handler.
1641 */
1642 cancel_delayed_work(&hdev->discov_off);
1643 hdev->discov_timeout = timeout;
1644
1645 if (cp->val)
1646 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1647 else
1648 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1649
1650 /* Limited discoverable mode */
1651 if (cp->val == 0x02)
1652 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1653 else
1654 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1655
1656 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1657 mgmt_set_discoverable_complete);
1658
1659 if (err < 0)
1660 mgmt_pending_remove(cmd);
1661
1662 failed:
1663 hci_dev_unlock(hdev);
1664 return err;
1665 }
1666
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1667 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1668 int err)
1669 {
1670 struct mgmt_pending_cmd *cmd = data;
1671
1672 bt_dev_dbg(hdev, "err %d", err);
1673
1674 /* Make sure cmd still outstanding. */
1675 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1676 return;
1677
1678 hci_dev_lock(hdev);
1679
1680 if (err) {
1681 u8 mgmt_err = mgmt_status(err);
1682 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1683 goto done;
1684 }
1685
1686 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1687 new_settings(hdev, cmd->sk);
1688
1689 done:
1690 if (cmd)
1691 mgmt_pending_remove(cmd);
1692
1693 hci_dev_unlock(hdev);
1694 }
1695
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1696 static int set_connectable_update_settings(struct hci_dev *hdev,
1697 struct sock *sk, u8 val)
1698 {
1699 bool changed = false;
1700 int err;
1701
1702 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1703 changed = true;
1704
1705 if (val) {
1706 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1707 } else {
1708 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1709 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1710 }
1711
1712 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1713 if (err < 0)
1714 return err;
1715
1716 if (changed) {
1717 hci_update_scan(hdev);
1718 hci_update_passive_scan(hdev);
1719 return new_settings(hdev, sk);
1720 }
1721
1722 return 0;
1723 }
1724
set_connectable_sync(struct hci_dev * hdev,void * data)1725 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1726 {
1727 BT_DBG("%s", hdev->name);
1728
1729 return hci_update_connectable_sync(hdev);
1730 }
1731
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1732 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1733 u16 len)
1734 {
1735 struct mgmt_mode *cp = data;
1736 struct mgmt_pending_cmd *cmd;
1737 int err;
1738
1739 bt_dev_dbg(hdev, "sock %p", sk);
1740
1741 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1742 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1744 MGMT_STATUS_REJECTED);
1745
1746 if (cp->val != 0x00 && cp->val != 0x01)
1747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1748 MGMT_STATUS_INVALID_PARAMS);
1749
1750 hci_dev_lock(hdev);
1751
1752 if (!hdev_is_powered(hdev)) {
1753 err = set_connectable_update_settings(hdev, sk, cp->val);
1754 goto failed;
1755 }
1756
1757 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1758 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 MGMT_STATUS_BUSY);
1761 goto failed;
1762 }
1763
1764 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1765 if (!cmd) {
1766 err = -ENOMEM;
1767 goto failed;
1768 }
1769
1770 if (cp->val) {
1771 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1772 } else {
1773 if (hdev->discov_timeout > 0)
1774 cancel_delayed_work(&hdev->discov_off);
1775
1776 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1777 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1778 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1779 }
1780
1781 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1782 mgmt_set_connectable_complete);
1783
1784 if (err < 0)
1785 mgmt_pending_remove(cmd);
1786
1787 failed:
1788 hci_dev_unlock(hdev);
1789 return err;
1790 }
1791
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1792 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1793 u16 len)
1794 {
1795 struct mgmt_mode *cp = data;
1796 bool changed;
1797 int err;
1798
1799 bt_dev_dbg(hdev, "sock %p", sk);
1800
1801 if (cp->val != 0x00 && cp->val != 0x01)
1802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1803 MGMT_STATUS_INVALID_PARAMS);
1804
1805 hci_dev_lock(hdev);
1806
1807 if (cp->val)
1808 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1809 else
1810 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1811
1812 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1813 if (err < 0)
1814 goto unlock;
1815
1816 if (changed) {
1817 /* In limited privacy mode the change of bondable mode
1818 * may affect the local advertising address.
1819 */
1820 hci_update_discoverable(hdev);
1821
1822 err = new_settings(hdev, sk);
1823 }
1824
1825 unlock:
1826 hci_dev_unlock(hdev);
1827 return err;
1828 }
1829
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1830 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1831 u16 len)
1832 {
1833 struct mgmt_mode *cp = data;
1834 struct mgmt_pending_cmd *cmd;
1835 u8 val, status;
1836 int err;
1837
1838 bt_dev_dbg(hdev, "sock %p", sk);
1839
1840 status = mgmt_bredr_support(hdev);
1841 if (status)
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1843 status);
1844
1845 if (cp->val != 0x00 && cp->val != 0x01)
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1847 MGMT_STATUS_INVALID_PARAMS);
1848
1849 hci_dev_lock(hdev);
1850
1851 if (!hdev_is_powered(hdev)) {
1852 bool changed = false;
1853
1854 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1855 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1856 changed = true;
1857 }
1858
1859 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1860 if (err < 0)
1861 goto failed;
1862
1863 if (changed)
1864 err = new_settings(hdev, sk);
1865
1866 goto failed;
1867 }
1868
1869 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1870 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1871 MGMT_STATUS_BUSY);
1872 goto failed;
1873 }
1874
1875 val = !!cp->val;
1876
1877 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 goto failed;
1880 }
1881
1882 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1883 if (!cmd) {
1884 err = -ENOMEM;
1885 goto failed;
1886 }
1887
1888 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1889 if (err < 0) {
1890 mgmt_pending_remove(cmd);
1891 goto failed;
1892 }
1893
1894 failed:
1895 hci_dev_unlock(hdev);
1896 return err;
1897 }
1898
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1899 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1900 {
1901 struct cmd_lookup match = { NULL, hdev };
1902 struct mgmt_pending_cmd *cmd = data;
1903 struct mgmt_mode *cp = cmd->param;
1904 u8 enable = cp->val;
1905 bool changed;
1906
1907 /* Make sure cmd still outstanding. */
1908 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1909 return;
1910
1911 if (err) {
1912 u8 mgmt_err = mgmt_status(err);
1913
1914 if (enable && hci_dev_test_and_clear_flag(hdev,
1915 HCI_SSP_ENABLED)) {
1916 new_settings(hdev, NULL);
1917 }
1918
1919 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1920 &mgmt_err);
1921 return;
1922 }
1923
1924 if (enable) {
1925 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1926 } else {
1927 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1928 }
1929
1930 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1931
1932 if (changed)
1933 new_settings(hdev, match.sk);
1934
1935 if (match.sk)
1936 sock_put(match.sk);
1937
1938 hci_update_eir_sync(hdev);
1939 }
1940
set_ssp_sync(struct hci_dev * hdev,void * data)1941 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1942 {
1943 struct mgmt_pending_cmd *cmd = data;
1944 struct mgmt_mode *cp = cmd->param;
1945 bool changed = false;
1946 int err;
1947
1948 if (cp->val)
1949 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1950
1951 err = hci_write_ssp_mode_sync(hdev, cp->val);
1952
1953 if (!err && changed)
1954 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1955
1956 return err;
1957 }
1958
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1959 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1960 {
1961 struct mgmt_mode *cp = data;
1962 struct mgmt_pending_cmd *cmd;
1963 u8 status;
1964 int err;
1965
1966 bt_dev_dbg(hdev, "sock %p", sk);
1967
1968 status = mgmt_bredr_support(hdev);
1969 if (status)
1970 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1971
1972 if (!lmp_ssp_capable(hdev))
1973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1974 MGMT_STATUS_NOT_SUPPORTED);
1975
1976 if (cp->val != 0x00 && cp->val != 0x01)
1977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1978 MGMT_STATUS_INVALID_PARAMS);
1979
1980 hci_dev_lock(hdev);
1981
1982 if (!hdev_is_powered(hdev)) {
1983 bool changed;
1984
1985 if (cp->val) {
1986 changed = !hci_dev_test_and_set_flag(hdev,
1987 HCI_SSP_ENABLED);
1988 } else {
1989 changed = hci_dev_test_and_clear_flag(hdev,
1990 HCI_SSP_ENABLED);
1991 }
1992
1993 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1994 if (err < 0)
1995 goto failed;
1996
1997 if (changed)
1998 err = new_settings(hdev, sk);
1999
2000 goto failed;
2001 }
2002
2003 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2005 MGMT_STATUS_BUSY);
2006 goto failed;
2007 }
2008
2009 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2010 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2011 goto failed;
2012 }
2013
2014 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2015 if (!cmd)
2016 err = -ENOMEM;
2017 else
2018 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2019 set_ssp_complete);
2020
2021 if (err < 0) {
2022 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2023 MGMT_STATUS_FAILED);
2024
2025 if (cmd)
2026 mgmt_pending_remove(cmd);
2027 }
2028
2029 failed:
2030 hci_dev_unlock(hdev);
2031 return err;
2032 }
2033
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2034 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2035 {
2036 bt_dev_dbg(hdev, "sock %p", sk);
2037
2038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2039 MGMT_STATUS_NOT_SUPPORTED);
2040 }
2041
set_le_complete(struct hci_dev * hdev,void * data,int err)2042 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2043 {
2044 struct cmd_lookup match = { NULL, hdev };
2045 u8 status = mgmt_status(err);
2046
2047 bt_dev_dbg(hdev, "err %d", err);
2048
2049 if (status) {
2050 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2051 &status);
2052 return;
2053 }
2054
2055 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2056
2057 new_settings(hdev, match.sk);
2058
2059 if (match.sk)
2060 sock_put(match.sk);
2061 }
2062
set_le_sync(struct hci_dev * hdev,void * data)2063 static int set_le_sync(struct hci_dev *hdev, void *data)
2064 {
2065 struct mgmt_pending_cmd *cmd = data;
2066 struct mgmt_mode *cp = cmd->param;
2067 u8 val = !!cp->val;
2068 int err;
2069
2070 if (!val) {
2071 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2072
2073 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2074 hci_disable_advertising_sync(hdev);
2075
2076 if (ext_adv_capable(hdev))
2077 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2078 } else {
2079 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2080 }
2081
2082 err = hci_write_le_host_supported_sync(hdev, val, 0);
2083
2084 /* Make sure the controller has a good default for
2085 * advertising data. Restrict the update to when LE
2086 * has actually been enabled. During power on, the
2087 * update in powered_update_hci will take care of it.
2088 */
2089 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2090 if (ext_adv_capable(hdev)) {
2091 int status;
2092
2093 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2094 if (!status)
2095 hci_update_scan_rsp_data_sync(hdev, 0x00);
2096 } else {
2097 hci_update_adv_data_sync(hdev, 0x00);
2098 hci_update_scan_rsp_data_sync(hdev, 0x00);
2099 }
2100
2101 hci_update_passive_scan(hdev);
2102 }
2103
2104 return err;
2105 }
2106
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2107 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2108 {
2109 struct mgmt_pending_cmd *cmd = data;
2110 u8 status = mgmt_status(err);
2111 struct sock *sk = cmd->sk;
2112
2113 if (status) {
2114 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2115 cmd_status_rsp, &status);
2116 return;
2117 }
2118
2119 mgmt_pending_remove(cmd);
2120 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2121 }
2122
set_mesh_sync(struct hci_dev * hdev,void * data)2123 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2124 {
2125 struct mgmt_pending_cmd *cmd = data;
2126 struct mgmt_cp_set_mesh *cp = cmd->param;
2127 size_t len = cmd->param_len;
2128
2129 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2130
2131 if (cp->enable)
2132 hci_dev_set_flag(hdev, HCI_MESH);
2133 else
2134 hci_dev_clear_flag(hdev, HCI_MESH);
2135
2136 len -= sizeof(*cp);
2137
2138 /* If filters don't fit, forward all adv pkts */
2139 if (len <= sizeof(hdev->mesh_ad_types))
2140 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2141
2142 hci_update_passive_scan_sync(hdev);
2143 return 0;
2144 }
2145
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2146 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2147 {
2148 struct mgmt_cp_set_mesh *cp = data;
2149 struct mgmt_pending_cmd *cmd;
2150 int err = 0;
2151
2152 bt_dev_dbg(hdev, "sock %p", sk);
2153
2154 if (!lmp_le_capable(hdev) ||
2155 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2157 MGMT_STATUS_NOT_SUPPORTED);
2158
2159 if (cp->enable != 0x00 && cp->enable != 0x01)
2160 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2161 MGMT_STATUS_INVALID_PARAMS);
2162
2163 hci_dev_lock(hdev);
2164
2165 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2166 if (!cmd)
2167 err = -ENOMEM;
2168 else
2169 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2170 set_mesh_complete);
2171
2172 if (err < 0) {
2173 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2174 MGMT_STATUS_FAILED);
2175
2176 if (cmd)
2177 mgmt_pending_remove(cmd);
2178 }
2179
2180 hci_dev_unlock(hdev);
2181 return err;
2182 }
2183
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2184 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2185 {
2186 struct mgmt_mesh_tx *mesh_tx = data;
2187 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2188 unsigned long mesh_send_interval;
2189 u8 mgmt_err = mgmt_status(err);
2190
2191 /* Report any errors here, but don't report completion */
2192
2193 if (mgmt_err) {
2194 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2195 /* Send Complete Error Code for handle */
2196 mesh_send_complete(hdev, mesh_tx, false);
2197 return;
2198 }
2199
2200 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2201 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2202 mesh_send_interval);
2203 }
2204
mesh_send_sync(struct hci_dev * hdev,void * data)2205 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2206 {
2207 struct mgmt_mesh_tx *mesh_tx = data;
2208 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2209 struct adv_info *adv, *next_instance;
2210 u8 instance = hdev->le_num_of_adv_sets + 1;
2211 u16 timeout, duration;
2212 int err = 0;
2213
2214 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2215 return MGMT_STATUS_BUSY;
2216
2217 timeout = 1000;
2218 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2219 adv = hci_add_adv_instance(hdev, instance, 0,
2220 send->adv_data_len, send->adv_data,
2221 0, NULL,
2222 timeout, duration,
2223 HCI_ADV_TX_POWER_NO_PREFERENCE,
2224 hdev->le_adv_min_interval,
2225 hdev->le_adv_max_interval,
2226 mesh_tx->handle);
2227
2228 if (!IS_ERR(adv))
2229 mesh_tx->instance = instance;
2230 else
2231 err = PTR_ERR(adv);
2232
2233 if (hdev->cur_adv_instance == instance) {
2234 /* If the currently advertised instance is being changed then
2235 * cancel the current advertising and schedule the next
2236 * instance. If there is only one instance then the overridden
2237 * advertising data will be visible right away.
2238 */
2239 cancel_adv_timeout(hdev);
2240
2241 next_instance = hci_get_next_instance(hdev, instance);
2242 if (next_instance)
2243 instance = next_instance->instance;
2244 else
2245 instance = 0;
2246 } else if (hdev->adv_instance_timeout) {
2247 /* Immediately advertise the new instance if no other, or
2248 * let it go naturally from queue if ADV is already happening
2249 */
2250 instance = 0;
2251 }
2252
2253 if (instance)
2254 return hci_schedule_adv_instance_sync(hdev, instance, true);
2255
2256 return err;
2257 }
2258
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2259 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2260 {
2261 struct mgmt_rp_mesh_read_features *rp = data;
2262
2263 if (rp->used_handles >= rp->max_handles)
2264 return;
2265
2266 rp->handles[rp->used_handles++] = mesh_tx->handle;
2267 }
2268
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2269 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2270 void *data, u16 len)
2271 {
2272 struct mgmt_rp_mesh_read_features rp;
2273
2274 if (!lmp_le_capable(hdev) ||
2275 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2277 MGMT_STATUS_NOT_SUPPORTED);
2278
2279 memset(&rp, 0, sizeof(rp));
2280 rp.index = cpu_to_le16(hdev->id);
2281 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2282 rp.max_handles = MESH_HANDLES_MAX;
2283
2284 hci_dev_lock(hdev);
2285
2286 if (rp.max_handles)
2287 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2288
2289 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2290 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2291
2292 hci_dev_unlock(hdev);
2293 return 0;
2294 }
2295
send_cancel(struct hci_dev * hdev,void * data)2296 static int send_cancel(struct hci_dev *hdev, void *data)
2297 {
2298 struct mgmt_pending_cmd *cmd = data;
2299 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2300 struct mgmt_mesh_tx *mesh_tx;
2301
2302 if (!cancel->handle) {
2303 do {
2304 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2305
2306 if (mesh_tx)
2307 mesh_send_complete(hdev, mesh_tx, false);
2308 } while (mesh_tx);
2309 } else {
2310 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2311
2312 if (mesh_tx && mesh_tx->sk == cmd->sk)
2313 mesh_send_complete(hdev, mesh_tx, false);
2314 }
2315
2316 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2317 0, NULL, 0);
2318 mgmt_pending_free(cmd);
2319
2320 return 0;
2321 }
2322
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2323 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2324 void *data, u16 len)
2325 {
2326 struct mgmt_pending_cmd *cmd;
2327 int err;
2328
2329 if (!lmp_le_capable(hdev) ||
2330 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2331 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2332 MGMT_STATUS_NOT_SUPPORTED);
2333
2334 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2336 MGMT_STATUS_REJECTED);
2337
2338 hci_dev_lock(hdev);
2339 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2340 if (!cmd)
2341 err = -ENOMEM;
2342 else
2343 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2344
2345 if (err < 0) {
2346 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2347 MGMT_STATUS_FAILED);
2348
2349 if (cmd)
2350 mgmt_pending_free(cmd);
2351 }
2352
2353 hci_dev_unlock(hdev);
2354 return err;
2355 }
2356
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2357 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2358 {
2359 struct mgmt_mesh_tx *mesh_tx;
2360 struct mgmt_cp_mesh_send *send = data;
2361 struct mgmt_rp_mesh_read_features rp;
2362 bool sending;
2363 int err = 0;
2364
2365 if (!lmp_le_capable(hdev) ||
2366 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2368 MGMT_STATUS_NOT_SUPPORTED);
2369 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2370 len <= MGMT_MESH_SEND_SIZE ||
2371 len > (MGMT_MESH_SEND_SIZE + 31))
2372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2373 MGMT_STATUS_REJECTED);
2374
2375 hci_dev_lock(hdev);
2376
2377 memset(&rp, 0, sizeof(rp));
2378 rp.max_handles = MESH_HANDLES_MAX;
2379
2380 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2381
2382 if (rp.max_handles <= rp.used_handles) {
2383 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2384 MGMT_STATUS_BUSY);
2385 goto done;
2386 }
2387
2388 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2389 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2390
2391 if (!mesh_tx)
2392 err = -ENOMEM;
2393 else if (!sending)
2394 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2395 mesh_send_start_complete);
2396
2397 if (err < 0) {
2398 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2399 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 MGMT_STATUS_FAILED);
2401
2402 if (mesh_tx) {
2403 if (sending)
2404 mgmt_mesh_remove(mesh_tx);
2405 }
2406 } else {
2407 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2408
2409 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2410 &mesh_tx->handle, 1);
2411 }
2412
2413 done:
2414 hci_dev_unlock(hdev);
2415 return err;
2416 }
2417
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2418 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2419 {
2420 struct mgmt_mode *cp = data;
2421 struct mgmt_pending_cmd *cmd;
2422 int err;
2423 u8 val, enabled;
2424
2425 bt_dev_dbg(hdev, "sock %p", sk);
2426
2427 if (!lmp_le_capable(hdev))
2428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2429 MGMT_STATUS_NOT_SUPPORTED);
2430
2431 if (cp->val != 0x00 && cp->val != 0x01)
2432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2433 MGMT_STATUS_INVALID_PARAMS);
2434
2435 /* Bluetooth single mode LE only controllers or dual-mode
2436 * controllers configured as LE only devices, do not allow
2437 * switching LE off. These have either LE enabled explicitly
2438 * or BR/EDR has been previously switched off.
2439 *
2440 * When trying to enable an already enabled LE, then gracefully
2441 * send a positive response. Trying to disable it however will
2442 * result into rejection.
2443 */
2444 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2445 if (cp->val == 0x01)
2446 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2447
2448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2449 MGMT_STATUS_REJECTED);
2450 }
2451
2452 hci_dev_lock(hdev);
2453
2454 val = !!cp->val;
2455 enabled = lmp_host_le_capable(hdev);
2456
2457 if (!hdev_is_powered(hdev) || val == enabled) {
2458 bool changed = false;
2459
2460 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2461 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2462 changed = true;
2463 }
2464
2465 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2466 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2467 changed = true;
2468 }
2469
2470 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2471 if (err < 0)
2472 goto unlock;
2473
2474 if (changed)
2475 err = new_settings(hdev, sk);
2476
2477 goto unlock;
2478 }
2479
2480 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2481 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2482 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2483 MGMT_STATUS_BUSY);
2484 goto unlock;
2485 }
2486
2487 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2488 if (!cmd)
2489 err = -ENOMEM;
2490 else
2491 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2492 set_le_complete);
2493
2494 if (err < 0) {
2495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2496 MGMT_STATUS_FAILED);
2497
2498 if (cmd)
2499 mgmt_pending_remove(cmd);
2500 }
2501
2502 unlock:
2503 hci_dev_unlock(hdev);
2504 return err;
2505 }
2506
2507 /* This is a helper function to test for pending mgmt commands that can
2508 * cause CoD or EIR HCI commands. We can only allow one such pending
2509 * mgmt command at a time since otherwise we cannot easily track what
2510 * the current values are, will be, and based on that calculate if a new
2511 * HCI command needs to be sent and if yes with what value.
2512 */
pending_eir_or_class(struct hci_dev * hdev)2513 static bool pending_eir_or_class(struct hci_dev *hdev)
2514 {
2515 struct mgmt_pending_cmd *cmd;
2516
2517 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2518 switch (cmd->opcode) {
2519 case MGMT_OP_ADD_UUID:
2520 case MGMT_OP_REMOVE_UUID:
2521 case MGMT_OP_SET_DEV_CLASS:
2522 case MGMT_OP_SET_POWERED:
2523 return true;
2524 }
2525 }
2526
2527 return false;
2528 }
2529
2530 static const u8 bluetooth_base_uuid[] = {
2531 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2532 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2533 };
2534
get_uuid_size(const u8 * uuid)2535 static u8 get_uuid_size(const u8 *uuid)
2536 {
2537 u32 val;
2538
2539 if (memcmp(uuid, bluetooth_base_uuid, 12))
2540 return 128;
2541
2542 val = get_unaligned_le32(&uuid[12]);
2543 if (val > 0xffff)
2544 return 32;
2545
2546 return 16;
2547 }
2548
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2549 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2550 {
2551 struct mgmt_pending_cmd *cmd = data;
2552
2553 bt_dev_dbg(hdev, "err %d", err);
2554
2555 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2556 mgmt_status(err), hdev->dev_class, 3);
2557
2558 mgmt_pending_free(cmd);
2559 }
2560
add_uuid_sync(struct hci_dev * hdev,void * data)2561 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2562 {
2563 int err;
2564
2565 err = hci_update_class_sync(hdev);
2566 if (err)
2567 return err;
2568
2569 return hci_update_eir_sync(hdev);
2570 }
2571
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2572 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2573 {
2574 struct mgmt_cp_add_uuid *cp = data;
2575 struct mgmt_pending_cmd *cmd;
2576 struct bt_uuid *uuid;
2577 int err;
2578
2579 bt_dev_dbg(hdev, "sock %p", sk);
2580
2581 hci_dev_lock(hdev);
2582
2583 if (pending_eir_or_class(hdev)) {
2584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2585 MGMT_STATUS_BUSY);
2586 goto failed;
2587 }
2588
2589 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2590 if (!uuid) {
2591 err = -ENOMEM;
2592 goto failed;
2593 }
2594
2595 memcpy(uuid->uuid, cp->uuid, 16);
2596 uuid->svc_hint = cp->svc_hint;
2597 uuid->size = get_uuid_size(cp->uuid);
2598
2599 list_add_tail(&uuid->list, &hdev->uuids);
2600
2601 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2602 if (!cmd) {
2603 err = -ENOMEM;
2604 goto failed;
2605 }
2606
2607 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2608 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2609 */
2610 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2611 mgmt_class_complete);
2612 if (err < 0) {
2613 mgmt_pending_free(cmd);
2614 goto failed;
2615 }
2616
2617 failed:
2618 hci_dev_unlock(hdev);
2619 return err;
2620 }
2621
enable_service_cache(struct hci_dev * hdev)2622 static bool enable_service_cache(struct hci_dev *hdev)
2623 {
2624 if (!hdev_is_powered(hdev))
2625 return false;
2626
2627 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2628 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2629 CACHE_TIMEOUT);
2630 return true;
2631 }
2632
2633 return false;
2634 }
2635
remove_uuid_sync(struct hci_dev * hdev,void * data)2636 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2637 {
2638 int err;
2639
2640 err = hci_update_class_sync(hdev);
2641 if (err)
2642 return err;
2643
2644 return hci_update_eir_sync(hdev);
2645 }
2646
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2647 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2648 u16 len)
2649 {
2650 struct mgmt_cp_remove_uuid *cp = data;
2651 struct mgmt_pending_cmd *cmd;
2652 struct bt_uuid *match, *tmp;
2653 static const u8 bt_uuid_any[] = {
2654 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2655 };
2656 int err, found;
2657
2658 bt_dev_dbg(hdev, "sock %p", sk);
2659
2660 hci_dev_lock(hdev);
2661
2662 if (pending_eir_or_class(hdev)) {
2663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2664 MGMT_STATUS_BUSY);
2665 goto unlock;
2666 }
2667
2668 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2669 hci_uuids_clear(hdev);
2670
2671 if (enable_service_cache(hdev)) {
2672 err = mgmt_cmd_complete(sk, hdev->id,
2673 MGMT_OP_REMOVE_UUID,
2674 0, hdev->dev_class, 3);
2675 goto unlock;
2676 }
2677
2678 goto update_class;
2679 }
2680
2681 found = 0;
2682
2683 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2684 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2685 continue;
2686
2687 list_del(&match->list);
2688 kfree(match);
2689 found++;
2690 }
2691
2692 if (found == 0) {
2693 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2694 MGMT_STATUS_INVALID_PARAMS);
2695 goto unlock;
2696 }
2697
2698 update_class:
2699 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2700 if (!cmd) {
2701 err = -ENOMEM;
2702 goto unlock;
2703 }
2704
2705 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2706 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2707 */
2708 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2709 mgmt_class_complete);
2710 if (err < 0)
2711 mgmt_pending_free(cmd);
2712
2713 unlock:
2714 hci_dev_unlock(hdev);
2715 return err;
2716 }
2717
set_class_sync(struct hci_dev * hdev,void * data)2718 static int set_class_sync(struct hci_dev *hdev, void *data)
2719 {
2720 int err = 0;
2721
2722 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2723 cancel_delayed_work_sync(&hdev->service_cache);
2724 err = hci_update_eir_sync(hdev);
2725 }
2726
2727 if (err)
2728 return err;
2729
2730 return hci_update_class_sync(hdev);
2731 }
2732
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2733 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2734 u16 len)
2735 {
2736 struct mgmt_cp_set_dev_class *cp = data;
2737 struct mgmt_pending_cmd *cmd;
2738 int err;
2739
2740 bt_dev_dbg(hdev, "sock %p", sk);
2741
2742 if (!lmp_bredr_capable(hdev))
2743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2744 MGMT_STATUS_NOT_SUPPORTED);
2745
2746 hci_dev_lock(hdev);
2747
2748 if (pending_eir_or_class(hdev)) {
2749 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 MGMT_STATUS_BUSY);
2751 goto unlock;
2752 }
2753
2754 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 MGMT_STATUS_INVALID_PARAMS);
2757 goto unlock;
2758 }
2759
2760 hdev->major_class = cp->major;
2761 hdev->minor_class = cp->minor;
2762
2763 if (!hdev_is_powered(hdev)) {
2764 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2765 hdev->dev_class, 3);
2766 goto unlock;
2767 }
2768
2769 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2770 if (!cmd) {
2771 err = -ENOMEM;
2772 goto unlock;
2773 }
2774
2775 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2776 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2777 */
2778 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2779 mgmt_class_complete);
2780 if (err < 0)
2781 mgmt_pending_free(cmd);
2782
2783 unlock:
2784 hci_dev_unlock(hdev);
2785 return err;
2786 }
2787
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2788 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2789 u16 len)
2790 {
2791 struct mgmt_cp_load_link_keys *cp = data;
2792 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2793 sizeof(struct mgmt_link_key_info));
2794 u16 key_count, expected_len;
2795 bool changed;
2796 int i;
2797
2798 bt_dev_dbg(hdev, "sock %p", sk);
2799
2800 if (!lmp_bredr_capable(hdev))
2801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2802 MGMT_STATUS_NOT_SUPPORTED);
2803
2804 key_count = __le16_to_cpu(cp->key_count);
2805 if (key_count > max_key_count) {
2806 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2807 key_count);
2808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2809 MGMT_STATUS_INVALID_PARAMS);
2810 }
2811
2812 expected_len = struct_size(cp, keys, key_count);
2813 if (expected_len != len) {
2814 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2815 expected_len, len);
2816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2817 MGMT_STATUS_INVALID_PARAMS);
2818 }
2819
2820 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2822 MGMT_STATUS_INVALID_PARAMS);
2823
2824 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2825 key_count);
2826
2827 hci_dev_lock(hdev);
2828
2829 hci_link_keys_clear(hdev);
2830
2831 if (cp->debug_keys)
2832 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2833 else
2834 changed = hci_dev_test_and_clear_flag(hdev,
2835 HCI_KEEP_DEBUG_KEYS);
2836
2837 if (changed)
2838 new_settings(hdev, NULL);
2839
2840 for (i = 0; i < key_count; i++) {
2841 struct mgmt_link_key_info *key = &cp->keys[i];
2842
2843 if (hci_is_blocked_key(hdev,
2844 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2845 key->val)) {
2846 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2847 &key->addr.bdaddr);
2848 continue;
2849 }
2850
2851 if (key->addr.type != BDADDR_BREDR) {
2852 bt_dev_warn(hdev,
2853 "Invalid link address type %u for %pMR",
2854 key->addr.type, &key->addr.bdaddr);
2855 continue;
2856 }
2857
2858 if (key->type > 0x08) {
2859 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2860 key->type, &key->addr.bdaddr);
2861 continue;
2862 }
2863
2864 /* Always ignore debug keys and require a new pairing if
2865 * the user wants to use them.
2866 */
2867 if (key->type == HCI_LK_DEBUG_COMBINATION)
2868 continue;
2869
2870 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2871 key->type, key->pin_len, NULL);
2872 }
2873
2874 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2875
2876 hci_dev_unlock(hdev);
2877
2878 return 0;
2879 }
2880
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2881 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2882 u8 addr_type, struct sock *skip_sk)
2883 {
2884 struct mgmt_ev_device_unpaired ev;
2885
2886 bacpy(&ev.addr.bdaddr, bdaddr);
2887 ev.addr.type = addr_type;
2888
2889 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2890 skip_sk);
2891 }
2892
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2893 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2894 {
2895 struct mgmt_pending_cmd *cmd = data;
2896 struct mgmt_cp_unpair_device *cp = cmd->param;
2897
2898 if (!err)
2899 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2900
2901 cmd->cmd_complete(cmd, err);
2902 mgmt_pending_free(cmd);
2903 }
2904
unpair_device_sync(struct hci_dev * hdev,void * data)2905 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2906 {
2907 struct mgmt_pending_cmd *cmd = data;
2908 struct mgmt_cp_unpair_device *cp = cmd->param;
2909 struct hci_conn *conn;
2910
2911 if (cp->addr.type == BDADDR_BREDR)
2912 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2913 &cp->addr.bdaddr);
2914 else
2915 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2916 le_addr_type(cp->addr.type));
2917
2918 if (!conn)
2919 return 0;
2920
2921 /* Disregard any possible error since the likes of hci_abort_conn_sync
2922 * will clean up the connection no matter the error.
2923 */
2924 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2925
2926 return 0;
2927 }
2928
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2929 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2930 u16 len)
2931 {
2932 struct mgmt_cp_unpair_device *cp = data;
2933 struct mgmt_rp_unpair_device rp;
2934 struct hci_conn_params *params;
2935 struct mgmt_pending_cmd *cmd;
2936 struct hci_conn *conn;
2937 u8 addr_type;
2938 int err;
2939
2940 memset(&rp, 0, sizeof(rp));
2941 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2942 rp.addr.type = cp->addr.type;
2943
2944 if (!bdaddr_type_is_valid(cp->addr.type))
2945 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2946 MGMT_STATUS_INVALID_PARAMS,
2947 &rp, sizeof(rp));
2948
2949 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2950 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2951 MGMT_STATUS_INVALID_PARAMS,
2952 &rp, sizeof(rp));
2953
2954 hci_dev_lock(hdev);
2955
2956 if (!hdev_is_powered(hdev)) {
2957 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2958 MGMT_STATUS_NOT_POWERED, &rp,
2959 sizeof(rp));
2960 goto unlock;
2961 }
2962
2963 if (cp->addr.type == BDADDR_BREDR) {
2964 /* If disconnection is requested, then look up the
2965 * connection. If the remote device is connected, it
2966 * will be later used to terminate the link.
2967 *
2968 * Setting it to NULL explicitly will cause no
2969 * termination of the link.
2970 */
2971 if (cp->disconnect)
2972 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2973 &cp->addr.bdaddr);
2974 else
2975 conn = NULL;
2976
2977 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2978 if (err < 0) {
2979 err = mgmt_cmd_complete(sk, hdev->id,
2980 MGMT_OP_UNPAIR_DEVICE,
2981 MGMT_STATUS_NOT_PAIRED, &rp,
2982 sizeof(rp));
2983 goto unlock;
2984 }
2985
2986 goto done;
2987 }
2988
2989 /* LE address type */
2990 addr_type = le_addr_type(cp->addr.type);
2991
2992 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2993 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2994 if (err < 0) {
2995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2996 MGMT_STATUS_NOT_PAIRED, &rp,
2997 sizeof(rp));
2998 goto unlock;
2999 }
3000
3001 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3002 if (!conn) {
3003 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3004 goto done;
3005 }
3006
3007
3008 /* Defer clearing up the connection parameters until closing to
3009 * give a chance of keeping them if a repairing happens.
3010 */
3011 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3012
3013 /* Disable auto-connection parameters if present */
3014 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3015 if (params) {
3016 if (params->explicit_connect)
3017 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3018 else
3019 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3020 }
3021
3022 /* If disconnection is not requested, then clear the connection
3023 * variable so that the link is not terminated.
3024 */
3025 if (!cp->disconnect)
3026 conn = NULL;
3027
3028 done:
3029 /* If the connection variable is set, then termination of the
3030 * link is requested.
3031 */
3032 if (!conn) {
3033 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3034 &rp, sizeof(rp));
3035 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3036 goto unlock;
3037 }
3038
3039 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3040 sizeof(*cp));
3041 if (!cmd) {
3042 err = -ENOMEM;
3043 goto unlock;
3044 }
3045
3046 cmd->cmd_complete = addr_cmd_complete;
3047
3048 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3049 unpair_device_complete);
3050 if (err < 0)
3051 mgmt_pending_free(cmd);
3052
3053 unlock:
3054 hci_dev_unlock(hdev);
3055 return err;
3056 }
3057
disconnect_complete(struct hci_dev * hdev,void * data,int err)3058 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3059 {
3060 struct mgmt_pending_cmd *cmd = data;
3061
3062 cmd->cmd_complete(cmd, mgmt_status(err));
3063 mgmt_pending_free(cmd);
3064 }
3065
disconnect_sync(struct hci_dev * hdev,void * data)3066 static int disconnect_sync(struct hci_dev *hdev, void *data)
3067 {
3068 struct mgmt_pending_cmd *cmd = data;
3069 struct mgmt_cp_disconnect *cp = cmd->param;
3070 struct hci_conn *conn;
3071
3072 if (cp->addr.type == BDADDR_BREDR)
3073 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3074 &cp->addr.bdaddr);
3075 else
3076 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3077 le_addr_type(cp->addr.type));
3078
3079 if (!conn)
3080 return -ENOTCONN;
3081
3082 /* Disregard any possible error since the likes of hci_abort_conn_sync
3083 * will clean up the connection no matter the error.
3084 */
3085 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3086
3087 return 0;
3088 }
3089
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3090 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3091 u16 len)
3092 {
3093 struct mgmt_cp_disconnect *cp = data;
3094 struct mgmt_rp_disconnect rp;
3095 struct mgmt_pending_cmd *cmd;
3096 int err;
3097
3098 bt_dev_dbg(hdev, "sock %p", sk);
3099
3100 memset(&rp, 0, sizeof(rp));
3101 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3102 rp.addr.type = cp->addr.type;
3103
3104 if (!bdaddr_type_is_valid(cp->addr.type))
3105 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3106 MGMT_STATUS_INVALID_PARAMS,
3107 &rp, sizeof(rp));
3108
3109 hci_dev_lock(hdev);
3110
3111 if (!test_bit(HCI_UP, &hdev->flags)) {
3112 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3113 MGMT_STATUS_NOT_POWERED, &rp,
3114 sizeof(rp));
3115 goto failed;
3116 }
3117
3118 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3119 if (!cmd) {
3120 err = -ENOMEM;
3121 goto failed;
3122 }
3123
3124 cmd->cmd_complete = generic_cmd_complete;
3125
3126 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3127 disconnect_complete);
3128 if (err < 0)
3129 mgmt_pending_free(cmd);
3130
3131 failed:
3132 hci_dev_unlock(hdev);
3133 return err;
3134 }
3135
link_to_bdaddr(u8 link_type,u8 addr_type)3136 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3137 {
3138 switch (link_type) {
3139 case ISO_LINK:
3140 case LE_LINK:
3141 switch (addr_type) {
3142 case ADDR_LE_DEV_PUBLIC:
3143 return BDADDR_LE_PUBLIC;
3144
3145 default:
3146 /* Fallback to LE Random address type */
3147 return BDADDR_LE_RANDOM;
3148 }
3149
3150 default:
3151 /* Fallback to BR/EDR type */
3152 return BDADDR_BREDR;
3153 }
3154 }
3155
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3156 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3157 u16 data_len)
3158 {
3159 struct mgmt_rp_get_connections *rp;
3160 struct hci_conn *c;
3161 int err;
3162 u16 i;
3163
3164 bt_dev_dbg(hdev, "sock %p", sk);
3165
3166 hci_dev_lock(hdev);
3167
3168 if (!hdev_is_powered(hdev)) {
3169 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3170 MGMT_STATUS_NOT_POWERED);
3171 goto unlock;
3172 }
3173
3174 i = 0;
3175 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3176 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3177 i++;
3178 }
3179
3180 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3181 if (!rp) {
3182 err = -ENOMEM;
3183 goto unlock;
3184 }
3185
3186 i = 0;
3187 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3188 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3189 continue;
3190 bacpy(&rp->addr[i].bdaddr, &c->dst);
3191 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3192 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3193 continue;
3194 i++;
3195 }
3196
3197 rp->conn_count = cpu_to_le16(i);
3198
3199 /* Recalculate length in case of filtered SCO connections, etc */
3200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3201 struct_size(rp, addr, i));
3202
3203 kfree(rp);
3204
3205 unlock:
3206 hci_dev_unlock(hdev);
3207 return err;
3208 }
3209
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3210 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3211 struct mgmt_cp_pin_code_neg_reply *cp)
3212 {
3213 struct mgmt_pending_cmd *cmd;
3214 int err;
3215
3216 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3217 sizeof(*cp));
3218 if (!cmd)
3219 return -ENOMEM;
3220
3221 cmd->cmd_complete = addr_cmd_complete;
3222
3223 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3224 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3225 if (err < 0)
3226 mgmt_pending_remove(cmd);
3227
3228 return err;
3229 }
3230
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3231 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3232 u16 len)
3233 {
3234 struct hci_conn *conn;
3235 struct mgmt_cp_pin_code_reply *cp = data;
3236 struct hci_cp_pin_code_reply reply;
3237 struct mgmt_pending_cmd *cmd;
3238 int err;
3239
3240 bt_dev_dbg(hdev, "sock %p", sk);
3241
3242 hci_dev_lock(hdev);
3243
3244 if (!hdev_is_powered(hdev)) {
3245 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3246 MGMT_STATUS_NOT_POWERED);
3247 goto failed;
3248 }
3249
3250 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3251 if (!conn) {
3252 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3253 MGMT_STATUS_NOT_CONNECTED);
3254 goto failed;
3255 }
3256
3257 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3258 struct mgmt_cp_pin_code_neg_reply ncp;
3259
3260 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3261
3262 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3263
3264 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3265 if (err >= 0)
3266 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3267 MGMT_STATUS_INVALID_PARAMS);
3268
3269 goto failed;
3270 }
3271
3272 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3273 if (!cmd) {
3274 err = -ENOMEM;
3275 goto failed;
3276 }
3277
3278 cmd->cmd_complete = addr_cmd_complete;
3279
3280 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3281 reply.pin_len = cp->pin_len;
3282 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3283
3284 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3285 if (err < 0)
3286 mgmt_pending_remove(cmd);
3287
3288 failed:
3289 hci_dev_unlock(hdev);
3290 return err;
3291 }
3292
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3293 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3294 u16 len)
3295 {
3296 struct mgmt_cp_set_io_capability *cp = data;
3297
3298 bt_dev_dbg(hdev, "sock %p", sk);
3299
3300 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3302 MGMT_STATUS_INVALID_PARAMS);
3303
3304 hci_dev_lock(hdev);
3305
3306 hdev->io_capability = cp->io_capability;
3307
3308 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3309
3310 hci_dev_unlock(hdev);
3311
3312 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3313 NULL, 0);
3314 }
3315
find_pairing(struct hci_conn * conn)3316 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3317 {
3318 struct hci_dev *hdev = conn->hdev;
3319 struct mgmt_pending_cmd *cmd;
3320
3321 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3322 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3323 continue;
3324
3325 if (cmd->user_data != conn)
3326 continue;
3327
3328 return cmd;
3329 }
3330
3331 return NULL;
3332 }
3333
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3334 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3335 {
3336 struct mgmt_rp_pair_device rp;
3337 struct hci_conn *conn = cmd->user_data;
3338 int err;
3339
3340 bacpy(&rp.addr.bdaddr, &conn->dst);
3341 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3342
3343 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3344 status, &rp, sizeof(rp));
3345
3346 /* So we don't get further callbacks for this connection */
3347 conn->connect_cfm_cb = NULL;
3348 conn->security_cfm_cb = NULL;
3349 conn->disconn_cfm_cb = NULL;
3350
3351 hci_conn_drop(conn);
3352
3353 /* The device is paired so there is no need to remove
3354 * its connection parameters anymore.
3355 */
3356 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3357
3358 hci_conn_put(conn);
3359
3360 return err;
3361 }
3362
mgmt_smp_complete(struct hci_conn * conn,bool complete)3363 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3364 {
3365 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3366 struct mgmt_pending_cmd *cmd;
3367
3368 cmd = find_pairing(conn);
3369 if (cmd) {
3370 cmd->cmd_complete(cmd, status);
3371 mgmt_pending_remove(cmd);
3372 }
3373 }
3374
pairing_complete_cb(struct hci_conn * conn,u8 status)3375 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3376 {
3377 struct mgmt_pending_cmd *cmd;
3378
3379 BT_DBG("status %u", status);
3380
3381 cmd = find_pairing(conn);
3382 if (!cmd) {
3383 BT_DBG("Unable to find a pending command");
3384 return;
3385 }
3386
3387 cmd->cmd_complete(cmd, mgmt_status(status));
3388 mgmt_pending_remove(cmd);
3389 }
3390
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3391 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3392 {
3393 struct mgmt_pending_cmd *cmd;
3394
3395 BT_DBG("status %u", status);
3396
3397 if (!status)
3398 return;
3399
3400 cmd = find_pairing(conn);
3401 if (!cmd) {
3402 BT_DBG("Unable to find a pending command");
3403 return;
3404 }
3405
3406 cmd->cmd_complete(cmd, mgmt_status(status));
3407 mgmt_pending_remove(cmd);
3408 }
3409
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3410 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3411 u16 len)
3412 {
3413 struct mgmt_cp_pair_device *cp = data;
3414 struct mgmt_rp_pair_device rp;
3415 struct mgmt_pending_cmd *cmd;
3416 u8 sec_level, auth_type;
3417 struct hci_conn *conn;
3418 int err;
3419
3420 bt_dev_dbg(hdev, "sock %p", sk);
3421
3422 memset(&rp, 0, sizeof(rp));
3423 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3424 rp.addr.type = cp->addr.type;
3425
3426 if (!bdaddr_type_is_valid(cp->addr.type))
3427 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3428 MGMT_STATUS_INVALID_PARAMS,
3429 &rp, sizeof(rp));
3430
3431 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3432 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3433 MGMT_STATUS_INVALID_PARAMS,
3434 &rp, sizeof(rp));
3435
3436 hci_dev_lock(hdev);
3437
3438 if (!hdev_is_powered(hdev)) {
3439 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3440 MGMT_STATUS_NOT_POWERED, &rp,
3441 sizeof(rp));
3442 goto unlock;
3443 }
3444
3445 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3446 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3447 MGMT_STATUS_ALREADY_PAIRED, &rp,
3448 sizeof(rp));
3449 goto unlock;
3450 }
3451
3452 sec_level = BT_SECURITY_MEDIUM;
3453 auth_type = HCI_AT_DEDICATED_BONDING;
3454
3455 if (cp->addr.type == BDADDR_BREDR) {
3456 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3457 auth_type, CONN_REASON_PAIR_DEVICE);
3458 } else {
3459 u8 addr_type = le_addr_type(cp->addr.type);
3460 struct hci_conn_params *p;
3461
3462 /* When pairing a new device, it is expected to remember
3463 * this device for future connections. Adding the connection
3464 * parameter information ahead of time allows tracking
3465 * of the peripheral preferred values and will speed up any
3466 * further connection establishment.
3467 *
3468 * If connection parameters already exist, then they
3469 * will be kept and this function does nothing.
3470 */
3471 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3472 if (!p) {
3473 err = -EIO;
3474 goto unlock;
3475 }
3476
3477 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3478 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3479
3480 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3481 sec_level, HCI_LE_CONN_TIMEOUT,
3482 CONN_REASON_PAIR_DEVICE);
3483 }
3484
3485 if (IS_ERR(conn)) {
3486 int status;
3487
3488 if (PTR_ERR(conn) == -EBUSY)
3489 status = MGMT_STATUS_BUSY;
3490 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3491 status = MGMT_STATUS_NOT_SUPPORTED;
3492 else if (PTR_ERR(conn) == -ECONNREFUSED)
3493 status = MGMT_STATUS_REJECTED;
3494 else
3495 status = MGMT_STATUS_CONNECT_FAILED;
3496
3497 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3498 status, &rp, sizeof(rp));
3499 goto unlock;
3500 }
3501
3502 if (conn->connect_cfm_cb) {
3503 hci_conn_drop(conn);
3504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3505 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3506 goto unlock;
3507 }
3508
3509 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3510 if (!cmd) {
3511 err = -ENOMEM;
3512 hci_conn_drop(conn);
3513 goto unlock;
3514 }
3515
3516 cmd->cmd_complete = pairing_complete;
3517
3518 /* For LE, just connecting isn't a proof that the pairing finished */
3519 if (cp->addr.type == BDADDR_BREDR) {
3520 conn->connect_cfm_cb = pairing_complete_cb;
3521 conn->security_cfm_cb = pairing_complete_cb;
3522 conn->disconn_cfm_cb = pairing_complete_cb;
3523 } else {
3524 conn->connect_cfm_cb = le_pairing_complete_cb;
3525 conn->security_cfm_cb = le_pairing_complete_cb;
3526 conn->disconn_cfm_cb = le_pairing_complete_cb;
3527 }
3528
3529 conn->io_capability = cp->io_cap;
3530 cmd->user_data = hci_conn_get(conn);
3531
3532 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3533 hci_conn_security(conn, sec_level, auth_type, true)) {
3534 cmd->cmd_complete(cmd, 0);
3535 mgmt_pending_remove(cmd);
3536 }
3537
3538 err = 0;
3539
3540 unlock:
3541 hci_dev_unlock(hdev);
3542 return err;
3543 }
3544
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3545 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3546 u16 len)
3547 {
3548 struct mgmt_addr_info *addr = data;
3549 struct mgmt_pending_cmd *cmd;
3550 struct hci_conn *conn;
3551 int err;
3552
3553 bt_dev_dbg(hdev, "sock %p", sk);
3554
3555 hci_dev_lock(hdev);
3556
3557 if (!hdev_is_powered(hdev)) {
3558 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3559 MGMT_STATUS_NOT_POWERED);
3560 goto unlock;
3561 }
3562
3563 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3564 if (!cmd) {
3565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3566 MGMT_STATUS_INVALID_PARAMS);
3567 goto unlock;
3568 }
3569
3570 conn = cmd->user_data;
3571
3572 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3574 MGMT_STATUS_INVALID_PARAMS);
3575 goto unlock;
3576 }
3577
3578 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3579 mgmt_pending_remove(cmd);
3580
3581 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3582 addr, sizeof(*addr));
3583
3584 /* Since user doesn't want to proceed with the connection, abort any
3585 * ongoing pairing and then terminate the link if it was created
3586 * because of the pair device action.
3587 */
3588 if (addr->type == BDADDR_BREDR)
3589 hci_remove_link_key(hdev, &addr->bdaddr);
3590 else
3591 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3592 le_addr_type(addr->type));
3593
3594 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3595 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3596
3597 unlock:
3598 hci_dev_unlock(hdev);
3599 return err;
3600 }
3601
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3602 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3603 struct mgmt_addr_info *addr, u16 mgmt_op,
3604 u16 hci_op, __le32 passkey)
3605 {
3606 struct mgmt_pending_cmd *cmd;
3607 struct hci_conn *conn;
3608 int err;
3609
3610 hci_dev_lock(hdev);
3611
3612 if (!hdev_is_powered(hdev)) {
3613 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3614 MGMT_STATUS_NOT_POWERED, addr,
3615 sizeof(*addr));
3616 goto done;
3617 }
3618
3619 if (addr->type == BDADDR_BREDR)
3620 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3621 else
3622 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3623 le_addr_type(addr->type));
3624
3625 if (!conn) {
3626 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3627 MGMT_STATUS_NOT_CONNECTED, addr,
3628 sizeof(*addr));
3629 goto done;
3630 }
3631
3632 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3633 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3634 if (!err)
3635 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3636 MGMT_STATUS_SUCCESS, addr,
3637 sizeof(*addr));
3638 else
3639 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3640 MGMT_STATUS_FAILED, addr,
3641 sizeof(*addr));
3642
3643 goto done;
3644 }
3645
3646 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3647 if (!cmd) {
3648 err = -ENOMEM;
3649 goto done;
3650 }
3651
3652 cmd->cmd_complete = addr_cmd_complete;
3653
3654 /* Continue with pairing via HCI */
3655 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3656 struct hci_cp_user_passkey_reply cp;
3657
3658 bacpy(&cp.bdaddr, &addr->bdaddr);
3659 cp.passkey = passkey;
3660 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3661 } else
3662 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3663 &addr->bdaddr);
3664
3665 if (err < 0)
3666 mgmt_pending_remove(cmd);
3667
3668 done:
3669 hci_dev_unlock(hdev);
3670 return err;
3671 }
3672
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3673 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3674 void *data, u16 len)
3675 {
3676 struct mgmt_cp_pin_code_neg_reply *cp = data;
3677
3678 bt_dev_dbg(hdev, "sock %p", sk);
3679
3680 return user_pairing_resp(sk, hdev, &cp->addr,
3681 MGMT_OP_PIN_CODE_NEG_REPLY,
3682 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3683 }
3684
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3685 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3686 u16 len)
3687 {
3688 struct mgmt_cp_user_confirm_reply *cp = data;
3689
3690 bt_dev_dbg(hdev, "sock %p", sk);
3691
3692 if (len != sizeof(*cp))
3693 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3694 MGMT_STATUS_INVALID_PARAMS);
3695
3696 return user_pairing_resp(sk, hdev, &cp->addr,
3697 MGMT_OP_USER_CONFIRM_REPLY,
3698 HCI_OP_USER_CONFIRM_REPLY, 0);
3699 }
3700
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3701 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3702 void *data, u16 len)
3703 {
3704 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3705
3706 bt_dev_dbg(hdev, "sock %p", sk);
3707
3708 return user_pairing_resp(sk, hdev, &cp->addr,
3709 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3710 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3711 }
3712
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3713 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3714 u16 len)
3715 {
3716 struct mgmt_cp_user_passkey_reply *cp = data;
3717
3718 bt_dev_dbg(hdev, "sock %p", sk);
3719
3720 return user_pairing_resp(sk, hdev, &cp->addr,
3721 MGMT_OP_USER_PASSKEY_REPLY,
3722 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3723 }
3724
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3725 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3726 void *data, u16 len)
3727 {
3728 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3729
3730 bt_dev_dbg(hdev, "sock %p", sk);
3731
3732 return user_pairing_resp(sk, hdev, &cp->addr,
3733 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3734 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3735 }
3736
adv_expire_sync(struct hci_dev * hdev,u32 flags)3737 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3738 {
3739 struct adv_info *adv_instance;
3740
3741 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3742 if (!adv_instance)
3743 return 0;
3744
3745 /* stop if current instance doesn't need to be changed */
3746 if (!(adv_instance->flags & flags))
3747 return 0;
3748
3749 cancel_adv_timeout(hdev);
3750
3751 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3752 if (!adv_instance)
3753 return 0;
3754
3755 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3756
3757 return 0;
3758 }
3759
name_changed_sync(struct hci_dev * hdev,void * data)3760 static int name_changed_sync(struct hci_dev *hdev, void *data)
3761 {
3762 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3763 }
3764
set_name_complete(struct hci_dev * hdev,void * data,int err)3765 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3766 {
3767 struct mgmt_pending_cmd *cmd = data;
3768 struct mgmt_cp_set_local_name *cp = cmd->param;
3769 u8 status = mgmt_status(err);
3770
3771 bt_dev_dbg(hdev, "err %d", err);
3772
3773 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3774 return;
3775
3776 if (status) {
3777 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3778 status);
3779 } else {
3780 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3781 cp, sizeof(*cp));
3782
3783 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3784 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3785 }
3786
3787 mgmt_pending_remove(cmd);
3788 }
3789
set_name_sync(struct hci_dev * hdev,void * data)3790 static int set_name_sync(struct hci_dev *hdev, void *data)
3791 {
3792 if (lmp_bredr_capable(hdev)) {
3793 hci_update_name_sync(hdev);
3794 hci_update_eir_sync(hdev);
3795 }
3796
3797 /* The name is stored in the scan response data and so
3798 * no need to update the advertising data here.
3799 */
3800 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3801 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3802
3803 return 0;
3804 }
3805
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3806 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3807 u16 len)
3808 {
3809 struct mgmt_cp_set_local_name *cp = data;
3810 struct mgmt_pending_cmd *cmd;
3811 int err;
3812
3813 bt_dev_dbg(hdev, "sock %p", sk);
3814
3815 hci_dev_lock(hdev);
3816
3817 /* If the old values are the same as the new ones just return a
3818 * direct command complete event.
3819 */
3820 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3821 !memcmp(hdev->short_name, cp->short_name,
3822 sizeof(hdev->short_name))) {
3823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3824 data, len);
3825 goto failed;
3826 }
3827
3828 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3829
3830 if (!hdev_is_powered(hdev)) {
3831 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3832
3833 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3834 data, len);
3835 if (err < 0)
3836 goto failed;
3837
3838 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3839 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3840 ext_info_changed(hdev, sk);
3841
3842 goto failed;
3843 }
3844
3845 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3846 if (!cmd)
3847 err = -ENOMEM;
3848 else
3849 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3850 set_name_complete);
3851
3852 if (err < 0) {
3853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3854 MGMT_STATUS_FAILED);
3855
3856 if (cmd)
3857 mgmt_pending_remove(cmd);
3858
3859 goto failed;
3860 }
3861
3862 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3863
3864 failed:
3865 hci_dev_unlock(hdev);
3866 return err;
3867 }
3868
appearance_changed_sync(struct hci_dev * hdev,void * data)3869 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3870 {
3871 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3872 }
3873
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3874 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3875 u16 len)
3876 {
3877 struct mgmt_cp_set_appearance *cp = data;
3878 u16 appearance;
3879 int err;
3880
3881 bt_dev_dbg(hdev, "sock %p", sk);
3882
3883 if (!lmp_le_capable(hdev))
3884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3885 MGMT_STATUS_NOT_SUPPORTED);
3886
3887 appearance = le16_to_cpu(cp->appearance);
3888
3889 hci_dev_lock(hdev);
3890
3891 if (hdev->appearance != appearance) {
3892 hdev->appearance = appearance;
3893
3894 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3895 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3896 NULL);
3897
3898 ext_info_changed(hdev, sk);
3899 }
3900
3901 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3902 0);
3903
3904 hci_dev_unlock(hdev);
3905
3906 return err;
3907 }
3908
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3909 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3910 void *data, u16 len)
3911 {
3912 struct mgmt_rp_get_phy_configuration rp;
3913
3914 bt_dev_dbg(hdev, "sock %p", sk);
3915
3916 hci_dev_lock(hdev);
3917
3918 memset(&rp, 0, sizeof(rp));
3919
3920 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3921 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3922 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3923
3924 hci_dev_unlock(hdev);
3925
3926 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3927 &rp, sizeof(rp));
3928 }
3929
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3930 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3931 {
3932 struct mgmt_ev_phy_configuration_changed ev;
3933
3934 memset(&ev, 0, sizeof(ev));
3935
3936 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3937
3938 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3939 sizeof(ev), skip);
3940 }
3941
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3942 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3943 {
3944 struct mgmt_pending_cmd *cmd = data;
3945 struct sk_buff *skb = cmd->skb;
3946 u8 status = mgmt_status(err);
3947
3948 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3949 return;
3950
3951 if (!status) {
3952 if (!skb)
3953 status = MGMT_STATUS_FAILED;
3954 else if (IS_ERR(skb))
3955 status = mgmt_status(PTR_ERR(skb));
3956 else
3957 status = mgmt_status(skb->data[0]);
3958 }
3959
3960 bt_dev_dbg(hdev, "status %d", status);
3961
3962 if (status) {
3963 mgmt_cmd_status(cmd->sk, hdev->id,
3964 MGMT_OP_SET_PHY_CONFIGURATION, status);
3965 } else {
3966 mgmt_cmd_complete(cmd->sk, hdev->id,
3967 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3968 NULL, 0);
3969
3970 mgmt_phy_configuration_changed(hdev, cmd->sk);
3971 }
3972
3973 if (skb && !IS_ERR(skb))
3974 kfree_skb(skb);
3975
3976 mgmt_pending_remove(cmd);
3977 }
3978
set_default_phy_sync(struct hci_dev * hdev,void * data)3979 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3980 {
3981 struct mgmt_pending_cmd *cmd = data;
3982 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3983 struct hci_cp_le_set_default_phy cp_phy;
3984 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3985
3986 memset(&cp_phy, 0, sizeof(cp_phy));
3987
3988 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3989 cp_phy.all_phys |= 0x01;
3990
3991 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3992 cp_phy.all_phys |= 0x02;
3993
3994 if (selected_phys & MGMT_PHY_LE_1M_TX)
3995 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3996
3997 if (selected_phys & MGMT_PHY_LE_2M_TX)
3998 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3999
4000 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4001 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4002
4003 if (selected_phys & MGMT_PHY_LE_1M_RX)
4004 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4005
4006 if (selected_phys & MGMT_PHY_LE_2M_RX)
4007 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4008
4009 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4010 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4011
4012 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4013 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4014
4015 return 0;
4016 }
4017
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4018 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4019 void *data, u16 len)
4020 {
4021 struct mgmt_cp_set_phy_configuration *cp = data;
4022 struct mgmt_pending_cmd *cmd;
4023 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4024 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4025 bool changed = false;
4026 int err;
4027
4028 bt_dev_dbg(hdev, "sock %p", sk);
4029
4030 configurable_phys = get_configurable_phys(hdev);
4031 supported_phys = get_supported_phys(hdev);
4032 selected_phys = __le32_to_cpu(cp->selected_phys);
4033
4034 if (selected_phys & ~supported_phys)
4035 return mgmt_cmd_status(sk, hdev->id,
4036 MGMT_OP_SET_PHY_CONFIGURATION,
4037 MGMT_STATUS_INVALID_PARAMS);
4038
4039 unconfigure_phys = supported_phys & ~configurable_phys;
4040
4041 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4042 return mgmt_cmd_status(sk, hdev->id,
4043 MGMT_OP_SET_PHY_CONFIGURATION,
4044 MGMT_STATUS_INVALID_PARAMS);
4045
4046 if (selected_phys == get_selected_phys(hdev))
4047 return mgmt_cmd_complete(sk, hdev->id,
4048 MGMT_OP_SET_PHY_CONFIGURATION,
4049 0, NULL, 0);
4050
4051 hci_dev_lock(hdev);
4052
4053 if (!hdev_is_powered(hdev)) {
4054 err = mgmt_cmd_status(sk, hdev->id,
4055 MGMT_OP_SET_PHY_CONFIGURATION,
4056 MGMT_STATUS_REJECTED);
4057 goto unlock;
4058 }
4059
4060 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4061 err = mgmt_cmd_status(sk, hdev->id,
4062 MGMT_OP_SET_PHY_CONFIGURATION,
4063 MGMT_STATUS_BUSY);
4064 goto unlock;
4065 }
4066
4067 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4068 pkt_type |= (HCI_DH3 | HCI_DM3);
4069 else
4070 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4071
4072 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4073 pkt_type |= (HCI_DH5 | HCI_DM5);
4074 else
4075 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4076
4077 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4078 pkt_type &= ~HCI_2DH1;
4079 else
4080 pkt_type |= HCI_2DH1;
4081
4082 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4083 pkt_type &= ~HCI_2DH3;
4084 else
4085 pkt_type |= HCI_2DH3;
4086
4087 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4088 pkt_type &= ~HCI_2DH5;
4089 else
4090 pkt_type |= HCI_2DH5;
4091
4092 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4093 pkt_type &= ~HCI_3DH1;
4094 else
4095 pkt_type |= HCI_3DH1;
4096
4097 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4098 pkt_type &= ~HCI_3DH3;
4099 else
4100 pkt_type |= HCI_3DH3;
4101
4102 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4103 pkt_type &= ~HCI_3DH5;
4104 else
4105 pkt_type |= HCI_3DH5;
4106
4107 if (pkt_type != hdev->pkt_type) {
4108 hdev->pkt_type = pkt_type;
4109 changed = true;
4110 }
4111
4112 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4113 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4114 if (changed)
4115 mgmt_phy_configuration_changed(hdev, sk);
4116
4117 err = mgmt_cmd_complete(sk, hdev->id,
4118 MGMT_OP_SET_PHY_CONFIGURATION,
4119 0, NULL, 0);
4120
4121 goto unlock;
4122 }
4123
4124 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4125 len);
4126 if (!cmd)
4127 err = -ENOMEM;
4128 else
4129 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4130 set_default_phy_complete);
4131
4132 if (err < 0) {
4133 err = mgmt_cmd_status(sk, hdev->id,
4134 MGMT_OP_SET_PHY_CONFIGURATION,
4135 MGMT_STATUS_FAILED);
4136
4137 if (cmd)
4138 mgmt_pending_remove(cmd);
4139 }
4140
4141 unlock:
4142 hci_dev_unlock(hdev);
4143
4144 return err;
4145 }
4146
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4147 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4148 u16 len)
4149 {
4150 int err = MGMT_STATUS_SUCCESS;
4151 struct mgmt_cp_set_blocked_keys *keys = data;
4152 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4153 sizeof(struct mgmt_blocked_key_info));
4154 u16 key_count, expected_len;
4155 int i;
4156
4157 bt_dev_dbg(hdev, "sock %p", sk);
4158
4159 key_count = __le16_to_cpu(keys->key_count);
4160 if (key_count > max_key_count) {
4161 bt_dev_err(hdev, "too big key_count value %u", key_count);
4162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4163 MGMT_STATUS_INVALID_PARAMS);
4164 }
4165
4166 expected_len = struct_size(keys, keys, key_count);
4167 if (expected_len != len) {
4168 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4169 expected_len, len);
4170 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4171 MGMT_STATUS_INVALID_PARAMS);
4172 }
4173
4174 hci_dev_lock(hdev);
4175
4176 hci_blocked_keys_clear(hdev);
4177
4178 for (i = 0; i < key_count; ++i) {
4179 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4180
4181 if (!b) {
4182 err = MGMT_STATUS_NO_RESOURCES;
4183 break;
4184 }
4185
4186 b->type = keys->keys[i].type;
4187 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4188 list_add_rcu(&b->list, &hdev->blocked_keys);
4189 }
4190 hci_dev_unlock(hdev);
4191
4192 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4193 err, NULL, 0);
4194 }
4195
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4196 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4197 void *data, u16 len)
4198 {
4199 struct mgmt_mode *cp = data;
4200 int err;
4201 bool changed = false;
4202
4203 bt_dev_dbg(hdev, "sock %p", sk);
4204
4205 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4206 return mgmt_cmd_status(sk, hdev->id,
4207 MGMT_OP_SET_WIDEBAND_SPEECH,
4208 MGMT_STATUS_NOT_SUPPORTED);
4209
4210 if (cp->val != 0x00 && cp->val != 0x01)
4211 return mgmt_cmd_status(sk, hdev->id,
4212 MGMT_OP_SET_WIDEBAND_SPEECH,
4213 MGMT_STATUS_INVALID_PARAMS);
4214
4215 hci_dev_lock(hdev);
4216
4217 if (hdev_is_powered(hdev) &&
4218 !!cp->val != hci_dev_test_flag(hdev,
4219 HCI_WIDEBAND_SPEECH_ENABLED)) {
4220 err = mgmt_cmd_status(sk, hdev->id,
4221 MGMT_OP_SET_WIDEBAND_SPEECH,
4222 MGMT_STATUS_REJECTED);
4223 goto unlock;
4224 }
4225
4226 if (cp->val)
4227 changed = !hci_dev_test_and_set_flag(hdev,
4228 HCI_WIDEBAND_SPEECH_ENABLED);
4229 else
4230 changed = hci_dev_test_and_clear_flag(hdev,
4231 HCI_WIDEBAND_SPEECH_ENABLED);
4232
4233 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4234 if (err < 0)
4235 goto unlock;
4236
4237 if (changed)
4238 err = new_settings(hdev, sk);
4239
4240 unlock:
4241 hci_dev_unlock(hdev);
4242 return err;
4243 }
4244
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4245 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4246 void *data, u16 data_len)
4247 {
4248 char buf[20];
4249 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4250 u16 cap_len = 0;
4251 u8 flags = 0;
4252 u8 tx_power_range[2];
4253
4254 bt_dev_dbg(hdev, "sock %p", sk);
4255
4256 memset(&buf, 0, sizeof(buf));
4257
4258 hci_dev_lock(hdev);
4259
4260 /* When the Read Simple Pairing Options command is supported, then
4261 * the remote public key validation is supported.
4262 *
4263 * Alternatively, when Microsoft extensions are available, they can
4264 * indicate support for public key validation as well.
4265 */
4266 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4267 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4268
4269 flags |= 0x02; /* Remote public key validation (LE) */
4270
4271 /* When the Read Encryption Key Size command is supported, then the
4272 * encryption key size is enforced.
4273 */
4274 if (hdev->commands[20] & 0x10)
4275 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4276
4277 flags |= 0x08; /* Encryption key size enforcement (LE) */
4278
4279 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4280 &flags, 1);
4281
4282 /* When the Read Simple Pairing Options command is supported, then
4283 * also max encryption key size information is provided.
4284 */
4285 if (hdev->commands[41] & 0x08)
4286 cap_len = eir_append_le16(rp->cap, cap_len,
4287 MGMT_CAP_MAX_ENC_KEY_SIZE,
4288 hdev->max_enc_key_size);
4289
4290 cap_len = eir_append_le16(rp->cap, cap_len,
4291 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4292 SMP_MAX_ENC_KEY_SIZE);
4293
4294 /* Append the min/max LE tx power parameters if we were able to fetch
4295 * it from the controller
4296 */
4297 if (hdev->commands[38] & 0x80) {
4298 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4299 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4300 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4301 tx_power_range, 2);
4302 }
4303
4304 rp->cap_len = cpu_to_le16(cap_len);
4305
4306 hci_dev_unlock(hdev);
4307
4308 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4309 rp, sizeof(*rp) + cap_len);
4310 }
4311
4312 #ifdef CONFIG_BT_FEATURE_DEBUG
4313 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4314 static const u8 debug_uuid[16] = {
4315 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4316 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4317 };
4318 #endif
4319
4320 /* 330859bc-7506-492d-9370-9a6f0614037f */
4321 static const u8 quality_report_uuid[16] = {
4322 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4323 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4324 };
4325
4326 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4327 static const u8 offload_codecs_uuid[16] = {
4328 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4329 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4330 };
4331
4332 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4333 static const u8 le_simultaneous_roles_uuid[16] = {
4334 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4335 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4336 };
4337
4338 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4339 static const u8 rpa_resolution_uuid[16] = {
4340 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4341 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4342 };
4343
4344 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4345 static const u8 iso_socket_uuid[16] = {
4346 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4347 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4348 };
4349
4350 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4351 static const u8 mgmt_mesh_uuid[16] = {
4352 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4353 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4354 };
4355
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4356 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4357 void *data, u16 data_len)
4358 {
4359 struct mgmt_rp_read_exp_features_info *rp;
4360 size_t len;
4361 u16 idx = 0;
4362 u32 flags;
4363 int status;
4364
4365 bt_dev_dbg(hdev, "sock %p", sk);
4366
4367 /* Enough space for 7 features */
4368 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4369 rp = kzalloc(len, GFP_KERNEL);
4370 if (!rp)
4371 return -ENOMEM;
4372
4373 #ifdef CONFIG_BT_FEATURE_DEBUG
4374 if (!hdev) {
4375 flags = bt_dbg_get() ? BIT(0) : 0;
4376
4377 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4378 rp->features[idx].flags = cpu_to_le32(flags);
4379 idx++;
4380 }
4381 #endif
4382
4383 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4384 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4385 flags = BIT(0);
4386 else
4387 flags = 0;
4388
4389 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4390 rp->features[idx].flags = cpu_to_le32(flags);
4391 idx++;
4392 }
4393
4394 if (hdev && ll_privacy_capable(hdev)) {
4395 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4396 flags = BIT(0) | BIT(1);
4397 else
4398 flags = BIT(1);
4399
4400 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4401 rp->features[idx].flags = cpu_to_le32(flags);
4402 idx++;
4403 }
4404
4405 if (hdev && (aosp_has_quality_report(hdev) ||
4406 hdev->set_quality_report)) {
4407 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4408 flags = BIT(0);
4409 else
4410 flags = 0;
4411
4412 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4413 rp->features[idx].flags = cpu_to_le32(flags);
4414 idx++;
4415 }
4416
4417 if (hdev && hdev->get_data_path_id) {
4418 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4419 flags = BIT(0);
4420 else
4421 flags = 0;
4422
4423 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4424 rp->features[idx].flags = cpu_to_le32(flags);
4425 idx++;
4426 }
4427
4428 if (IS_ENABLED(CONFIG_BT_LE)) {
4429 flags = iso_enabled() ? BIT(0) : 0;
4430 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4431 rp->features[idx].flags = cpu_to_le32(flags);
4432 idx++;
4433 }
4434
4435 if (hdev && lmp_le_capable(hdev)) {
4436 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4437 flags = BIT(0);
4438 else
4439 flags = 0;
4440
4441 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4442 rp->features[idx].flags = cpu_to_le32(flags);
4443 idx++;
4444 }
4445
4446 rp->feature_count = cpu_to_le16(idx);
4447
4448 /* After reading the experimental features information, enable
4449 * the events to update client on any future change.
4450 */
4451 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4452
4453 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4454 MGMT_OP_READ_EXP_FEATURES_INFO,
4455 0, rp, sizeof(*rp) + (20 * idx));
4456
4457 kfree(rp);
4458 return status;
4459 }
4460
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4461 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4462 struct sock *skip)
4463 {
4464 struct mgmt_ev_exp_feature_changed ev;
4465
4466 memset(&ev, 0, sizeof(ev));
4467 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4468 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4469
4470 // Do we need to be atomic with the conn_flags?
4471 if (enabled && privacy_mode_capable(hdev))
4472 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4473 else
4474 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4475
4476 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4477 &ev, sizeof(ev),
4478 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4479
4480 }
4481
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4482 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4483 bool enabled, struct sock *skip)
4484 {
4485 struct mgmt_ev_exp_feature_changed ev;
4486
4487 memset(&ev, 0, sizeof(ev));
4488 memcpy(ev.uuid, uuid, 16);
4489 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4490
4491 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4492 &ev, sizeof(ev),
4493 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4494 }
4495
4496 #define EXP_FEAT(_uuid, _set_func) \
4497 { \
4498 .uuid = _uuid, \
4499 .set_func = _set_func, \
4500 }
4501
4502 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4503 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4504 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4505 {
4506 struct mgmt_rp_set_exp_feature rp;
4507
4508 memset(rp.uuid, 0, 16);
4509 rp.flags = cpu_to_le32(0);
4510
4511 #ifdef CONFIG_BT_FEATURE_DEBUG
4512 if (!hdev) {
4513 bool changed = bt_dbg_get();
4514
4515 bt_dbg_set(false);
4516
4517 if (changed)
4518 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4519 }
4520 #endif
4521
4522 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4523 bool changed;
4524
4525 changed = hci_dev_test_and_clear_flag(hdev,
4526 HCI_ENABLE_LL_PRIVACY);
4527 if (changed)
4528 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4529 sk);
4530 }
4531
4532 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4533
4534 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4535 MGMT_OP_SET_EXP_FEATURE, 0,
4536 &rp, sizeof(rp));
4537 }
4538
4539 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4540 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4541 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4542 {
4543 struct mgmt_rp_set_exp_feature rp;
4544
4545 bool val, changed;
4546 int err;
4547
4548 /* Command requires to use the non-controller index */
4549 if (hdev)
4550 return mgmt_cmd_status(sk, hdev->id,
4551 MGMT_OP_SET_EXP_FEATURE,
4552 MGMT_STATUS_INVALID_INDEX);
4553
4554 /* Parameters are limited to a single octet */
4555 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4556 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4557 MGMT_OP_SET_EXP_FEATURE,
4558 MGMT_STATUS_INVALID_PARAMS);
4559
4560 /* Only boolean on/off is supported */
4561 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4562 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4563 MGMT_OP_SET_EXP_FEATURE,
4564 MGMT_STATUS_INVALID_PARAMS);
4565
4566 val = !!cp->param[0];
4567 changed = val ? !bt_dbg_get() : bt_dbg_get();
4568 bt_dbg_set(val);
4569
4570 memcpy(rp.uuid, debug_uuid, 16);
4571 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4572
4573 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4574
4575 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4576 MGMT_OP_SET_EXP_FEATURE, 0,
4577 &rp, sizeof(rp));
4578
4579 if (changed)
4580 exp_feature_changed(hdev, debug_uuid, val, sk);
4581
4582 return err;
4583 }
4584 #endif
4585
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4586 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4587 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4588 {
4589 struct mgmt_rp_set_exp_feature rp;
4590 bool val, changed;
4591 int err;
4592
4593 /* Command requires to use the controller index */
4594 if (!hdev)
4595 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4596 MGMT_OP_SET_EXP_FEATURE,
4597 MGMT_STATUS_INVALID_INDEX);
4598
4599 /* Parameters are limited to a single octet */
4600 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4601 return mgmt_cmd_status(sk, hdev->id,
4602 MGMT_OP_SET_EXP_FEATURE,
4603 MGMT_STATUS_INVALID_PARAMS);
4604
4605 /* Only boolean on/off is supported */
4606 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4607 return mgmt_cmd_status(sk, hdev->id,
4608 MGMT_OP_SET_EXP_FEATURE,
4609 MGMT_STATUS_INVALID_PARAMS);
4610
4611 val = !!cp->param[0];
4612
4613 if (val) {
4614 changed = !hci_dev_test_and_set_flag(hdev,
4615 HCI_MESH_EXPERIMENTAL);
4616 } else {
4617 hci_dev_clear_flag(hdev, HCI_MESH);
4618 changed = hci_dev_test_and_clear_flag(hdev,
4619 HCI_MESH_EXPERIMENTAL);
4620 }
4621
4622 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4623 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4624
4625 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4626
4627 err = mgmt_cmd_complete(sk, hdev->id,
4628 MGMT_OP_SET_EXP_FEATURE, 0,
4629 &rp, sizeof(rp));
4630
4631 if (changed)
4632 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4633
4634 return err;
4635 }
4636
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4637 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4638 struct mgmt_cp_set_exp_feature *cp,
4639 u16 data_len)
4640 {
4641 struct mgmt_rp_set_exp_feature rp;
4642 bool val, changed;
4643 int err;
4644 u32 flags;
4645
4646 /* Command requires to use the controller index */
4647 if (!hdev)
4648 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4649 MGMT_OP_SET_EXP_FEATURE,
4650 MGMT_STATUS_INVALID_INDEX);
4651
4652 /* Changes can only be made when controller is powered down */
4653 if (hdev_is_powered(hdev))
4654 return mgmt_cmd_status(sk, hdev->id,
4655 MGMT_OP_SET_EXP_FEATURE,
4656 MGMT_STATUS_REJECTED);
4657
4658 /* Parameters are limited to a single octet */
4659 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4660 return mgmt_cmd_status(sk, hdev->id,
4661 MGMT_OP_SET_EXP_FEATURE,
4662 MGMT_STATUS_INVALID_PARAMS);
4663
4664 /* Only boolean on/off is supported */
4665 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4666 return mgmt_cmd_status(sk, hdev->id,
4667 MGMT_OP_SET_EXP_FEATURE,
4668 MGMT_STATUS_INVALID_PARAMS);
4669
4670 val = !!cp->param[0];
4671
4672 if (val) {
4673 changed = !hci_dev_test_and_set_flag(hdev,
4674 HCI_ENABLE_LL_PRIVACY);
4675 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4676
4677 /* Enable LL privacy + supported settings changed */
4678 flags = BIT(0) | BIT(1);
4679 } else {
4680 changed = hci_dev_test_and_clear_flag(hdev,
4681 HCI_ENABLE_LL_PRIVACY);
4682
4683 /* Disable LL privacy + supported settings changed */
4684 flags = BIT(1);
4685 }
4686
4687 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4688 rp.flags = cpu_to_le32(flags);
4689
4690 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4691
4692 err = mgmt_cmd_complete(sk, hdev->id,
4693 MGMT_OP_SET_EXP_FEATURE, 0,
4694 &rp, sizeof(rp));
4695
4696 if (changed)
4697 exp_ll_privacy_feature_changed(val, hdev, sk);
4698
4699 return err;
4700 }
4701
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4702 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4703 struct mgmt_cp_set_exp_feature *cp,
4704 u16 data_len)
4705 {
4706 struct mgmt_rp_set_exp_feature rp;
4707 bool val, changed;
4708 int err;
4709
4710 /* Command requires to use a valid controller index */
4711 if (!hdev)
4712 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4713 MGMT_OP_SET_EXP_FEATURE,
4714 MGMT_STATUS_INVALID_INDEX);
4715
4716 /* Parameters are limited to a single octet */
4717 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4718 return mgmt_cmd_status(sk, hdev->id,
4719 MGMT_OP_SET_EXP_FEATURE,
4720 MGMT_STATUS_INVALID_PARAMS);
4721
4722 /* Only boolean on/off is supported */
4723 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4724 return mgmt_cmd_status(sk, hdev->id,
4725 MGMT_OP_SET_EXP_FEATURE,
4726 MGMT_STATUS_INVALID_PARAMS);
4727
4728 hci_req_sync_lock(hdev);
4729
4730 val = !!cp->param[0];
4731 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4732
4733 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4734 err = mgmt_cmd_status(sk, hdev->id,
4735 MGMT_OP_SET_EXP_FEATURE,
4736 MGMT_STATUS_NOT_SUPPORTED);
4737 goto unlock_quality_report;
4738 }
4739
4740 if (changed) {
4741 if (hdev->set_quality_report)
4742 err = hdev->set_quality_report(hdev, val);
4743 else
4744 err = aosp_set_quality_report(hdev, val);
4745
4746 if (err) {
4747 err = mgmt_cmd_status(sk, hdev->id,
4748 MGMT_OP_SET_EXP_FEATURE,
4749 MGMT_STATUS_FAILED);
4750 goto unlock_quality_report;
4751 }
4752
4753 if (val)
4754 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4755 else
4756 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4757 }
4758
4759 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4760
4761 memcpy(rp.uuid, quality_report_uuid, 16);
4762 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4763 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4764
4765 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4766 &rp, sizeof(rp));
4767
4768 if (changed)
4769 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4770
4771 unlock_quality_report:
4772 hci_req_sync_unlock(hdev);
4773 return err;
4774 }
4775
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4776 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4777 struct mgmt_cp_set_exp_feature *cp,
4778 u16 data_len)
4779 {
4780 bool val, changed;
4781 int err;
4782 struct mgmt_rp_set_exp_feature rp;
4783
4784 /* Command requires to use a valid controller index */
4785 if (!hdev)
4786 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4787 MGMT_OP_SET_EXP_FEATURE,
4788 MGMT_STATUS_INVALID_INDEX);
4789
4790 /* Parameters are limited to a single octet */
4791 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4792 return mgmt_cmd_status(sk, hdev->id,
4793 MGMT_OP_SET_EXP_FEATURE,
4794 MGMT_STATUS_INVALID_PARAMS);
4795
4796 /* Only boolean on/off is supported */
4797 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4798 return mgmt_cmd_status(sk, hdev->id,
4799 MGMT_OP_SET_EXP_FEATURE,
4800 MGMT_STATUS_INVALID_PARAMS);
4801
4802 val = !!cp->param[0];
4803 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4804
4805 if (!hdev->get_data_path_id) {
4806 return mgmt_cmd_status(sk, hdev->id,
4807 MGMT_OP_SET_EXP_FEATURE,
4808 MGMT_STATUS_NOT_SUPPORTED);
4809 }
4810
4811 if (changed) {
4812 if (val)
4813 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4814 else
4815 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4816 }
4817
4818 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4819 val, changed);
4820
4821 memcpy(rp.uuid, offload_codecs_uuid, 16);
4822 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4823 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4824 err = mgmt_cmd_complete(sk, hdev->id,
4825 MGMT_OP_SET_EXP_FEATURE, 0,
4826 &rp, sizeof(rp));
4827
4828 if (changed)
4829 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4830
4831 return err;
4832 }
4833
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4834 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4835 struct mgmt_cp_set_exp_feature *cp,
4836 u16 data_len)
4837 {
4838 bool val, changed;
4839 int err;
4840 struct mgmt_rp_set_exp_feature rp;
4841
4842 /* Command requires to use a valid controller index */
4843 if (!hdev)
4844 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4845 MGMT_OP_SET_EXP_FEATURE,
4846 MGMT_STATUS_INVALID_INDEX);
4847
4848 /* Parameters are limited to a single octet */
4849 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4850 return mgmt_cmd_status(sk, hdev->id,
4851 MGMT_OP_SET_EXP_FEATURE,
4852 MGMT_STATUS_INVALID_PARAMS);
4853
4854 /* Only boolean on/off is supported */
4855 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4856 return mgmt_cmd_status(sk, hdev->id,
4857 MGMT_OP_SET_EXP_FEATURE,
4858 MGMT_STATUS_INVALID_PARAMS);
4859
4860 val = !!cp->param[0];
4861 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4862
4863 if (!hci_dev_le_state_simultaneous(hdev)) {
4864 return mgmt_cmd_status(sk, hdev->id,
4865 MGMT_OP_SET_EXP_FEATURE,
4866 MGMT_STATUS_NOT_SUPPORTED);
4867 }
4868
4869 if (changed) {
4870 if (val)
4871 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4872 else
4873 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4874 }
4875
4876 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4877 val, changed);
4878
4879 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4880 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4881 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4882 err = mgmt_cmd_complete(sk, hdev->id,
4883 MGMT_OP_SET_EXP_FEATURE, 0,
4884 &rp, sizeof(rp));
4885
4886 if (changed)
4887 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4888
4889 return err;
4890 }
4891
4892 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4893 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4894 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4895 {
4896 struct mgmt_rp_set_exp_feature rp;
4897 bool val, changed = false;
4898 int err;
4899
4900 /* Command requires to use the non-controller index */
4901 if (hdev)
4902 return mgmt_cmd_status(sk, hdev->id,
4903 MGMT_OP_SET_EXP_FEATURE,
4904 MGMT_STATUS_INVALID_INDEX);
4905
4906 /* Parameters are limited to a single octet */
4907 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4908 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4909 MGMT_OP_SET_EXP_FEATURE,
4910 MGMT_STATUS_INVALID_PARAMS);
4911
4912 /* Only boolean on/off is supported */
4913 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4914 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4915 MGMT_OP_SET_EXP_FEATURE,
4916 MGMT_STATUS_INVALID_PARAMS);
4917
4918 val = cp->param[0] ? true : false;
4919 if (val)
4920 err = iso_init();
4921 else
4922 err = iso_exit();
4923
4924 if (!err)
4925 changed = true;
4926
4927 memcpy(rp.uuid, iso_socket_uuid, 16);
4928 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4929
4930 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4931
4932 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4933 MGMT_OP_SET_EXP_FEATURE, 0,
4934 &rp, sizeof(rp));
4935
4936 if (changed)
4937 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4938
4939 return err;
4940 }
4941 #endif
4942
4943 static const struct mgmt_exp_feature {
4944 const u8 *uuid;
4945 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4946 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4947 } exp_features[] = {
4948 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4949 #ifdef CONFIG_BT_FEATURE_DEBUG
4950 EXP_FEAT(debug_uuid, set_debug_func),
4951 #endif
4952 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4953 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4954 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4955 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4956 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4957 #ifdef CONFIG_BT_LE
4958 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4959 #endif
4960
4961 /* end with a null feature */
4962 EXP_FEAT(NULL, NULL)
4963 };
4964
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4965 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4966 void *data, u16 data_len)
4967 {
4968 struct mgmt_cp_set_exp_feature *cp = data;
4969 size_t i = 0;
4970
4971 bt_dev_dbg(hdev, "sock %p", sk);
4972
4973 for (i = 0; exp_features[i].uuid; i++) {
4974 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4975 return exp_features[i].set_func(sk, hdev, cp, data_len);
4976 }
4977
4978 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4979 MGMT_OP_SET_EXP_FEATURE,
4980 MGMT_STATUS_NOT_SUPPORTED);
4981 }
4982
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4983 static u32 get_params_flags(struct hci_dev *hdev,
4984 struct hci_conn_params *params)
4985 {
4986 u32 flags = hdev->conn_flags;
4987
4988 /* Devices using RPAs can only be programmed in the acceptlist if
4989 * LL Privacy has been enable otherwise they cannot mark
4990 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4991 */
4992 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4993 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
4994 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4995
4996 return flags;
4997 }
4998
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4999 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5000 u16 data_len)
5001 {
5002 struct mgmt_cp_get_device_flags *cp = data;
5003 struct mgmt_rp_get_device_flags rp;
5004 struct bdaddr_list_with_flags *br_params;
5005 struct hci_conn_params *params;
5006 u32 supported_flags;
5007 u32 current_flags = 0;
5008 u8 status = MGMT_STATUS_INVALID_PARAMS;
5009
5010 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5011 &cp->addr.bdaddr, cp->addr.type);
5012
5013 hci_dev_lock(hdev);
5014
5015 supported_flags = hdev->conn_flags;
5016
5017 memset(&rp, 0, sizeof(rp));
5018
5019 if (cp->addr.type == BDADDR_BREDR) {
5020 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5021 &cp->addr.bdaddr,
5022 cp->addr.type);
5023 if (!br_params)
5024 goto done;
5025
5026 current_flags = br_params->flags;
5027 } else {
5028 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5029 le_addr_type(cp->addr.type));
5030 if (!params)
5031 goto done;
5032
5033 supported_flags = get_params_flags(hdev, params);
5034 current_flags = params->flags;
5035 }
5036
5037 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5038 rp.addr.type = cp->addr.type;
5039 rp.supported_flags = cpu_to_le32(supported_flags);
5040 rp.current_flags = cpu_to_le32(current_flags);
5041
5042 status = MGMT_STATUS_SUCCESS;
5043
5044 done:
5045 hci_dev_unlock(hdev);
5046
5047 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5048 &rp, sizeof(rp));
5049 }
5050
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5051 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5052 bdaddr_t *bdaddr, u8 bdaddr_type,
5053 u32 supported_flags, u32 current_flags)
5054 {
5055 struct mgmt_ev_device_flags_changed ev;
5056
5057 bacpy(&ev.addr.bdaddr, bdaddr);
5058 ev.addr.type = bdaddr_type;
5059 ev.supported_flags = cpu_to_le32(supported_flags);
5060 ev.current_flags = cpu_to_le32(current_flags);
5061
5062 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5063 }
5064
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5065 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5066 u16 len)
5067 {
5068 struct mgmt_cp_set_device_flags *cp = data;
5069 struct bdaddr_list_with_flags *br_params;
5070 struct hci_conn_params *params;
5071 u8 status = MGMT_STATUS_INVALID_PARAMS;
5072 u32 supported_flags;
5073 u32 current_flags = __le32_to_cpu(cp->current_flags);
5074
5075 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5076 &cp->addr.bdaddr, cp->addr.type, current_flags);
5077
5078 // We should take hci_dev_lock() early, I think.. conn_flags can change
5079 supported_flags = hdev->conn_flags;
5080
5081 if ((supported_flags | current_flags) != supported_flags) {
5082 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5083 current_flags, supported_flags);
5084 goto done;
5085 }
5086
5087 hci_dev_lock(hdev);
5088
5089 if (cp->addr.type == BDADDR_BREDR) {
5090 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5091 &cp->addr.bdaddr,
5092 cp->addr.type);
5093
5094 if (br_params) {
5095 br_params->flags = current_flags;
5096 status = MGMT_STATUS_SUCCESS;
5097 } else {
5098 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5099 &cp->addr.bdaddr, cp->addr.type);
5100 }
5101
5102 goto unlock;
5103 }
5104
5105 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5106 le_addr_type(cp->addr.type));
5107 if (!params) {
5108 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5109 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5110 goto unlock;
5111 }
5112
5113 supported_flags = get_params_flags(hdev, params);
5114
5115 if ((supported_flags | current_flags) != supported_flags) {
5116 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5117 current_flags, supported_flags);
5118 goto unlock;
5119 }
5120
5121 WRITE_ONCE(params->flags, current_flags);
5122 status = MGMT_STATUS_SUCCESS;
5123
5124 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5125 * has been set.
5126 */
5127 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5128 hci_update_passive_scan(hdev);
5129
5130 unlock:
5131 hci_dev_unlock(hdev);
5132
5133 done:
5134 if (status == MGMT_STATUS_SUCCESS)
5135 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5136 supported_flags, current_flags);
5137
5138 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5139 &cp->addr, sizeof(cp->addr));
5140 }
5141
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5142 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5143 u16 handle)
5144 {
5145 struct mgmt_ev_adv_monitor_added ev;
5146
5147 ev.monitor_handle = cpu_to_le16(handle);
5148
5149 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5150 }
5151
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5152 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5153 {
5154 struct mgmt_ev_adv_monitor_removed ev;
5155 struct mgmt_pending_cmd *cmd;
5156 struct sock *sk_skip = NULL;
5157 struct mgmt_cp_remove_adv_monitor *cp;
5158
5159 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5160 if (cmd) {
5161 cp = cmd->param;
5162
5163 if (cp->monitor_handle)
5164 sk_skip = cmd->sk;
5165 }
5166
5167 ev.monitor_handle = cpu_to_le16(handle);
5168
5169 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5170 }
5171
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5172 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5173 void *data, u16 len)
5174 {
5175 struct adv_monitor *monitor = NULL;
5176 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5177 int handle, err;
5178 size_t rp_size = 0;
5179 __u32 supported = 0;
5180 __u32 enabled = 0;
5181 __u16 num_handles = 0;
5182 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5183
5184 BT_DBG("request for %s", hdev->name);
5185
5186 hci_dev_lock(hdev);
5187
5188 if (msft_monitor_supported(hdev))
5189 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5190
5191 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5192 handles[num_handles++] = monitor->handle;
5193
5194 hci_dev_unlock(hdev);
5195
5196 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5197 rp = kmalloc(rp_size, GFP_KERNEL);
5198 if (!rp)
5199 return -ENOMEM;
5200
5201 /* All supported features are currently enabled */
5202 enabled = supported;
5203
5204 rp->supported_features = cpu_to_le32(supported);
5205 rp->enabled_features = cpu_to_le32(enabled);
5206 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5207 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5208 rp->num_handles = cpu_to_le16(num_handles);
5209 if (num_handles)
5210 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5211
5212 err = mgmt_cmd_complete(sk, hdev->id,
5213 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5214 MGMT_STATUS_SUCCESS, rp, rp_size);
5215
5216 kfree(rp);
5217
5218 return err;
5219 }
5220
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5221 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5222 void *data, int status)
5223 {
5224 struct mgmt_rp_add_adv_patterns_monitor rp;
5225 struct mgmt_pending_cmd *cmd = data;
5226 struct adv_monitor *monitor = cmd->user_data;
5227
5228 hci_dev_lock(hdev);
5229
5230 rp.monitor_handle = cpu_to_le16(monitor->handle);
5231
5232 if (!status) {
5233 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5234 hdev->adv_monitors_cnt++;
5235 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5236 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5237 hci_update_passive_scan(hdev);
5238 }
5239
5240 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5241 mgmt_status(status), &rp, sizeof(rp));
5242 mgmt_pending_remove(cmd);
5243
5244 hci_dev_unlock(hdev);
5245 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5246 rp.monitor_handle, status);
5247 }
5248
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5249 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5250 {
5251 struct mgmt_pending_cmd *cmd = data;
5252 struct adv_monitor *monitor = cmd->user_data;
5253
5254 return hci_add_adv_monitor(hdev, monitor);
5255 }
5256
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5257 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5258 struct adv_monitor *m, u8 status,
5259 void *data, u16 len, u16 op)
5260 {
5261 struct mgmt_pending_cmd *cmd;
5262 int err;
5263
5264 hci_dev_lock(hdev);
5265
5266 if (status)
5267 goto unlock;
5268
5269 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5270 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5271 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5272 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5273 status = MGMT_STATUS_BUSY;
5274 goto unlock;
5275 }
5276
5277 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5278 if (!cmd) {
5279 status = MGMT_STATUS_NO_RESOURCES;
5280 goto unlock;
5281 }
5282
5283 cmd->user_data = m;
5284 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5285 mgmt_add_adv_patterns_monitor_complete);
5286 if (err) {
5287 if (err == -ENOMEM)
5288 status = MGMT_STATUS_NO_RESOURCES;
5289 else
5290 status = MGMT_STATUS_FAILED;
5291
5292 goto unlock;
5293 }
5294
5295 hci_dev_unlock(hdev);
5296
5297 return 0;
5298
5299 unlock:
5300 hci_free_adv_monitor(hdev, m);
5301 hci_dev_unlock(hdev);
5302 return mgmt_cmd_status(sk, hdev->id, op, status);
5303 }
5304
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5305 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5306 struct mgmt_adv_rssi_thresholds *rssi)
5307 {
5308 if (rssi) {
5309 m->rssi.low_threshold = rssi->low_threshold;
5310 m->rssi.low_threshold_timeout =
5311 __le16_to_cpu(rssi->low_threshold_timeout);
5312 m->rssi.high_threshold = rssi->high_threshold;
5313 m->rssi.high_threshold_timeout =
5314 __le16_to_cpu(rssi->high_threshold_timeout);
5315 m->rssi.sampling_period = rssi->sampling_period;
5316 } else {
5317 /* Default values. These numbers are the least constricting
5318 * parameters for MSFT API to work, so it behaves as if there
5319 * are no rssi parameter to consider. May need to be changed
5320 * if other API are to be supported.
5321 */
5322 m->rssi.low_threshold = -127;
5323 m->rssi.low_threshold_timeout = 60;
5324 m->rssi.high_threshold = -127;
5325 m->rssi.high_threshold_timeout = 0;
5326 m->rssi.sampling_period = 0;
5327 }
5328 }
5329
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5330 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5331 struct mgmt_adv_pattern *patterns)
5332 {
5333 u8 offset = 0, length = 0;
5334 struct adv_pattern *p = NULL;
5335 int i;
5336
5337 for (i = 0; i < pattern_count; i++) {
5338 offset = patterns[i].offset;
5339 length = patterns[i].length;
5340 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5341 length > HCI_MAX_EXT_AD_LENGTH ||
5342 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5343 return MGMT_STATUS_INVALID_PARAMS;
5344
5345 p = kmalloc(sizeof(*p), GFP_KERNEL);
5346 if (!p)
5347 return MGMT_STATUS_NO_RESOURCES;
5348
5349 p->ad_type = patterns[i].ad_type;
5350 p->offset = patterns[i].offset;
5351 p->length = patterns[i].length;
5352 memcpy(p->value, patterns[i].value, p->length);
5353
5354 INIT_LIST_HEAD(&p->list);
5355 list_add(&p->list, &m->patterns);
5356 }
5357
5358 return MGMT_STATUS_SUCCESS;
5359 }
5360
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5361 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5362 void *data, u16 len)
5363 {
5364 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5365 struct adv_monitor *m = NULL;
5366 u8 status = MGMT_STATUS_SUCCESS;
5367 size_t expected_size = sizeof(*cp);
5368
5369 BT_DBG("request for %s", hdev->name);
5370
5371 if (len <= sizeof(*cp)) {
5372 status = MGMT_STATUS_INVALID_PARAMS;
5373 goto done;
5374 }
5375
5376 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5377 if (len != expected_size) {
5378 status = MGMT_STATUS_INVALID_PARAMS;
5379 goto done;
5380 }
5381
5382 m = kzalloc(sizeof(*m), GFP_KERNEL);
5383 if (!m) {
5384 status = MGMT_STATUS_NO_RESOURCES;
5385 goto done;
5386 }
5387
5388 INIT_LIST_HEAD(&m->patterns);
5389
5390 parse_adv_monitor_rssi(m, NULL);
5391 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5392
5393 done:
5394 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5395 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5396 }
5397
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5398 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5399 void *data, u16 len)
5400 {
5401 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5402 struct adv_monitor *m = NULL;
5403 u8 status = MGMT_STATUS_SUCCESS;
5404 size_t expected_size = sizeof(*cp);
5405
5406 BT_DBG("request for %s", hdev->name);
5407
5408 if (len <= sizeof(*cp)) {
5409 status = MGMT_STATUS_INVALID_PARAMS;
5410 goto done;
5411 }
5412
5413 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5414 if (len != expected_size) {
5415 status = MGMT_STATUS_INVALID_PARAMS;
5416 goto done;
5417 }
5418
5419 m = kzalloc(sizeof(*m), GFP_KERNEL);
5420 if (!m) {
5421 status = MGMT_STATUS_NO_RESOURCES;
5422 goto done;
5423 }
5424
5425 INIT_LIST_HEAD(&m->patterns);
5426
5427 parse_adv_monitor_rssi(m, &cp->rssi);
5428 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5429
5430 done:
5431 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5432 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5433 }
5434
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5435 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5436 void *data, int status)
5437 {
5438 struct mgmt_rp_remove_adv_monitor rp;
5439 struct mgmt_pending_cmd *cmd = data;
5440 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5441
5442 hci_dev_lock(hdev);
5443
5444 rp.monitor_handle = cp->monitor_handle;
5445
5446 if (!status)
5447 hci_update_passive_scan(hdev);
5448
5449 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5450 mgmt_status(status), &rp, sizeof(rp));
5451 mgmt_pending_remove(cmd);
5452
5453 hci_dev_unlock(hdev);
5454 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5455 rp.monitor_handle, status);
5456 }
5457
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5458 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5459 {
5460 struct mgmt_pending_cmd *cmd = data;
5461 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5462 u16 handle = __le16_to_cpu(cp->monitor_handle);
5463
5464 if (!handle)
5465 return hci_remove_all_adv_monitor(hdev);
5466
5467 return hci_remove_single_adv_monitor(hdev, handle);
5468 }
5469
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5470 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5471 void *data, u16 len)
5472 {
5473 struct mgmt_pending_cmd *cmd;
5474 int err, status;
5475
5476 hci_dev_lock(hdev);
5477
5478 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5479 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5480 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5481 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5482 status = MGMT_STATUS_BUSY;
5483 goto unlock;
5484 }
5485
5486 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5487 if (!cmd) {
5488 status = MGMT_STATUS_NO_RESOURCES;
5489 goto unlock;
5490 }
5491
5492 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5493 mgmt_remove_adv_monitor_complete);
5494
5495 if (err) {
5496 mgmt_pending_remove(cmd);
5497
5498 if (err == -ENOMEM)
5499 status = MGMT_STATUS_NO_RESOURCES;
5500 else
5501 status = MGMT_STATUS_FAILED;
5502
5503 goto unlock;
5504 }
5505
5506 hci_dev_unlock(hdev);
5507
5508 return 0;
5509
5510 unlock:
5511 hci_dev_unlock(hdev);
5512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5513 status);
5514 }
5515
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5516 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5517 {
5518 struct mgmt_rp_read_local_oob_data mgmt_rp;
5519 size_t rp_size = sizeof(mgmt_rp);
5520 struct mgmt_pending_cmd *cmd = data;
5521 struct sk_buff *skb = cmd->skb;
5522 u8 status = mgmt_status(err);
5523
5524 if (!status) {
5525 if (!skb)
5526 status = MGMT_STATUS_FAILED;
5527 else if (IS_ERR(skb))
5528 status = mgmt_status(PTR_ERR(skb));
5529 else
5530 status = mgmt_status(skb->data[0]);
5531 }
5532
5533 bt_dev_dbg(hdev, "status %d", status);
5534
5535 if (status) {
5536 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5537 goto remove;
5538 }
5539
5540 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5541
5542 if (!bredr_sc_enabled(hdev)) {
5543 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5544
5545 if (skb->len < sizeof(*rp)) {
5546 mgmt_cmd_status(cmd->sk, hdev->id,
5547 MGMT_OP_READ_LOCAL_OOB_DATA,
5548 MGMT_STATUS_FAILED);
5549 goto remove;
5550 }
5551
5552 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5553 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5554
5555 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5556 } else {
5557 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5558
5559 if (skb->len < sizeof(*rp)) {
5560 mgmt_cmd_status(cmd->sk, hdev->id,
5561 MGMT_OP_READ_LOCAL_OOB_DATA,
5562 MGMT_STATUS_FAILED);
5563 goto remove;
5564 }
5565
5566 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5567 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5568
5569 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5570 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5571 }
5572
5573 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5574 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5575
5576 remove:
5577 if (skb && !IS_ERR(skb))
5578 kfree_skb(skb);
5579
5580 mgmt_pending_free(cmd);
5581 }
5582
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5583 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5584 {
5585 struct mgmt_pending_cmd *cmd = data;
5586
5587 if (bredr_sc_enabled(hdev))
5588 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5589 else
5590 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5591
5592 if (IS_ERR(cmd->skb))
5593 return PTR_ERR(cmd->skb);
5594 else
5595 return 0;
5596 }
5597
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5598 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5599 void *data, u16 data_len)
5600 {
5601 struct mgmt_pending_cmd *cmd;
5602 int err;
5603
5604 bt_dev_dbg(hdev, "sock %p", sk);
5605
5606 hci_dev_lock(hdev);
5607
5608 if (!hdev_is_powered(hdev)) {
5609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5610 MGMT_STATUS_NOT_POWERED);
5611 goto unlock;
5612 }
5613
5614 if (!lmp_ssp_capable(hdev)) {
5615 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5616 MGMT_STATUS_NOT_SUPPORTED);
5617 goto unlock;
5618 }
5619
5620 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5621 if (!cmd)
5622 err = -ENOMEM;
5623 else
5624 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5625 read_local_oob_data_complete);
5626
5627 if (err < 0) {
5628 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5629 MGMT_STATUS_FAILED);
5630
5631 if (cmd)
5632 mgmt_pending_free(cmd);
5633 }
5634
5635 unlock:
5636 hci_dev_unlock(hdev);
5637 return err;
5638 }
5639
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5640 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5641 void *data, u16 len)
5642 {
5643 struct mgmt_addr_info *addr = data;
5644 int err;
5645
5646 bt_dev_dbg(hdev, "sock %p", sk);
5647
5648 if (!bdaddr_type_is_valid(addr->type))
5649 return mgmt_cmd_complete(sk, hdev->id,
5650 MGMT_OP_ADD_REMOTE_OOB_DATA,
5651 MGMT_STATUS_INVALID_PARAMS,
5652 addr, sizeof(*addr));
5653
5654 hci_dev_lock(hdev);
5655
5656 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5657 struct mgmt_cp_add_remote_oob_data *cp = data;
5658 u8 status;
5659
5660 if (cp->addr.type != BDADDR_BREDR) {
5661 err = mgmt_cmd_complete(sk, hdev->id,
5662 MGMT_OP_ADD_REMOTE_OOB_DATA,
5663 MGMT_STATUS_INVALID_PARAMS,
5664 &cp->addr, sizeof(cp->addr));
5665 goto unlock;
5666 }
5667
5668 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5669 cp->addr.type, cp->hash,
5670 cp->rand, NULL, NULL);
5671 if (err < 0)
5672 status = MGMT_STATUS_FAILED;
5673 else
5674 status = MGMT_STATUS_SUCCESS;
5675
5676 err = mgmt_cmd_complete(sk, hdev->id,
5677 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5678 &cp->addr, sizeof(cp->addr));
5679 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5680 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5681 u8 *rand192, *hash192, *rand256, *hash256;
5682 u8 status;
5683
5684 if (bdaddr_type_is_le(cp->addr.type)) {
5685 /* Enforce zero-valued 192-bit parameters as
5686 * long as legacy SMP OOB isn't implemented.
5687 */
5688 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5689 memcmp(cp->hash192, ZERO_KEY, 16)) {
5690 err = mgmt_cmd_complete(sk, hdev->id,
5691 MGMT_OP_ADD_REMOTE_OOB_DATA,
5692 MGMT_STATUS_INVALID_PARAMS,
5693 addr, sizeof(*addr));
5694 goto unlock;
5695 }
5696
5697 rand192 = NULL;
5698 hash192 = NULL;
5699 } else {
5700 /* In case one of the P-192 values is set to zero,
5701 * then just disable OOB data for P-192.
5702 */
5703 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5704 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5705 rand192 = NULL;
5706 hash192 = NULL;
5707 } else {
5708 rand192 = cp->rand192;
5709 hash192 = cp->hash192;
5710 }
5711 }
5712
5713 /* In case one of the P-256 values is set to zero, then just
5714 * disable OOB data for P-256.
5715 */
5716 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5717 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5718 rand256 = NULL;
5719 hash256 = NULL;
5720 } else {
5721 rand256 = cp->rand256;
5722 hash256 = cp->hash256;
5723 }
5724
5725 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5726 cp->addr.type, hash192, rand192,
5727 hash256, rand256);
5728 if (err < 0)
5729 status = MGMT_STATUS_FAILED;
5730 else
5731 status = MGMT_STATUS_SUCCESS;
5732
5733 err = mgmt_cmd_complete(sk, hdev->id,
5734 MGMT_OP_ADD_REMOTE_OOB_DATA,
5735 status, &cp->addr, sizeof(cp->addr));
5736 } else {
5737 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5738 len);
5739 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5740 MGMT_STATUS_INVALID_PARAMS);
5741 }
5742
5743 unlock:
5744 hci_dev_unlock(hdev);
5745 return err;
5746 }
5747
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5748 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5749 void *data, u16 len)
5750 {
5751 struct mgmt_cp_remove_remote_oob_data *cp = data;
5752 u8 status;
5753 int err;
5754
5755 bt_dev_dbg(hdev, "sock %p", sk);
5756
5757 if (cp->addr.type != BDADDR_BREDR)
5758 return mgmt_cmd_complete(sk, hdev->id,
5759 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5760 MGMT_STATUS_INVALID_PARAMS,
5761 &cp->addr, sizeof(cp->addr));
5762
5763 hci_dev_lock(hdev);
5764
5765 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5766 hci_remote_oob_data_clear(hdev);
5767 status = MGMT_STATUS_SUCCESS;
5768 goto done;
5769 }
5770
5771 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5772 if (err < 0)
5773 status = MGMT_STATUS_INVALID_PARAMS;
5774 else
5775 status = MGMT_STATUS_SUCCESS;
5776
5777 done:
5778 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5779 status, &cp->addr, sizeof(cp->addr));
5780
5781 hci_dev_unlock(hdev);
5782 return err;
5783 }
5784
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5785 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5786 {
5787 struct mgmt_pending_cmd *cmd;
5788
5789 bt_dev_dbg(hdev, "status %u", status);
5790
5791 hci_dev_lock(hdev);
5792
5793 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5794 if (!cmd)
5795 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5796
5797 if (!cmd)
5798 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5799
5800 if (cmd) {
5801 cmd->cmd_complete(cmd, mgmt_status(status));
5802 mgmt_pending_remove(cmd);
5803 }
5804
5805 hci_dev_unlock(hdev);
5806 }
5807
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5808 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5809 uint8_t *mgmt_status)
5810 {
5811 switch (type) {
5812 case DISCOV_TYPE_LE:
5813 *mgmt_status = mgmt_le_support(hdev);
5814 if (*mgmt_status)
5815 return false;
5816 break;
5817 case DISCOV_TYPE_INTERLEAVED:
5818 *mgmt_status = mgmt_le_support(hdev);
5819 if (*mgmt_status)
5820 return false;
5821 fallthrough;
5822 case DISCOV_TYPE_BREDR:
5823 *mgmt_status = mgmt_bredr_support(hdev);
5824 if (*mgmt_status)
5825 return false;
5826 break;
5827 default:
5828 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5829 return false;
5830 }
5831
5832 return true;
5833 }
5834
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5835 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5836 {
5837 struct mgmt_pending_cmd *cmd = data;
5838
5839 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5840 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5841 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5842 return;
5843
5844 bt_dev_dbg(hdev, "err %d", err);
5845
5846 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5847 cmd->param, 1);
5848 mgmt_pending_remove(cmd);
5849
5850 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5851 DISCOVERY_FINDING);
5852 }
5853
start_discovery_sync(struct hci_dev * hdev,void * data)5854 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5855 {
5856 return hci_start_discovery_sync(hdev);
5857 }
5858
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5859 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5860 u16 op, void *data, u16 len)
5861 {
5862 struct mgmt_cp_start_discovery *cp = data;
5863 struct mgmt_pending_cmd *cmd;
5864 u8 status;
5865 int err;
5866
5867 bt_dev_dbg(hdev, "sock %p", sk);
5868
5869 hci_dev_lock(hdev);
5870
5871 if (!hdev_is_powered(hdev)) {
5872 err = mgmt_cmd_complete(sk, hdev->id, op,
5873 MGMT_STATUS_NOT_POWERED,
5874 &cp->type, sizeof(cp->type));
5875 goto failed;
5876 }
5877
5878 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5879 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5880 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5881 &cp->type, sizeof(cp->type));
5882 goto failed;
5883 }
5884
5885 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5886 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5887 &cp->type, sizeof(cp->type));
5888 goto failed;
5889 }
5890
5891 /* Can't start discovery when it is paused */
5892 if (hdev->discovery_paused) {
5893 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5894 &cp->type, sizeof(cp->type));
5895 goto failed;
5896 }
5897
5898 /* Clear the discovery filter first to free any previously
5899 * allocated memory for the UUID list.
5900 */
5901 hci_discovery_filter_clear(hdev);
5902
5903 hdev->discovery.type = cp->type;
5904 hdev->discovery.report_invalid_rssi = false;
5905 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5906 hdev->discovery.limited = true;
5907 else
5908 hdev->discovery.limited = false;
5909
5910 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5911 if (!cmd) {
5912 err = -ENOMEM;
5913 goto failed;
5914 }
5915
5916 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5917 start_discovery_complete);
5918 if (err < 0) {
5919 mgmt_pending_remove(cmd);
5920 goto failed;
5921 }
5922
5923 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5924
5925 failed:
5926 hci_dev_unlock(hdev);
5927 return err;
5928 }
5929
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5930 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5931 void *data, u16 len)
5932 {
5933 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5934 data, len);
5935 }
5936
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5937 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5938 void *data, u16 len)
5939 {
5940 return start_discovery_internal(sk, hdev,
5941 MGMT_OP_START_LIMITED_DISCOVERY,
5942 data, len);
5943 }
5944
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5945 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5946 void *data, u16 len)
5947 {
5948 struct mgmt_cp_start_service_discovery *cp = data;
5949 struct mgmt_pending_cmd *cmd;
5950 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5951 u16 uuid_count, expected_len;
5952 u8 status;
5953 int err;
5954
5955 bt_dev_dbg(hdev, "sock %p", sk);
5956
5957 hci_dev_lock(hdev);
5958
5959 if (!hdev_is_powered(hdev)) {
5960 err = mgmt_cmd_complete(sk, hdev->id,
5961 MGMT_OP_START_SERVICE_DISCOVERY,
5962 MGMT_STATUS_NOT_POWERED,
5963 &cp->type, sizeof(cp->type));
5964 goto failed;
5965 }
5966
5967 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5968 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5969 err = mgmt_cmd_complete(sk, hdev->id,
5970 MGMT_OP_START_SERVICE_DISCOVERY,
5971 MGMT_STATUS_BUSY, &cp->type,
5972 sizeof(cp->type));
5973 goto failed;
5974 }
5975
5976 if (hdev->discovery_paused) {
5977 err = mgmt_cmd_complete(sk, hdev->id,
5978 MGMT_OP_START_SERVICE_DISCOVERY,
5979 MGMT_STATUS_BUSY, &cp->type,
5980 sizeof(cp->type));
5981 goto failed;
5982 }
5983
5984 uuid_count = __le16_to_cpu(cp->uuid_count);
5985 if (uuid_count > max_uuid_count) {
5986 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5987 uuid_count);
5988 err = mgmt_cmd_complete(sk, hdev->id,
5989 MGMT_OP_START_SERVICE_DISCOVERY,
5990 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5991 sizeof(cp->type));
5992 goto failed;
5993 }
5994
5995 expected_len = sizeof(*cp) + uuid_count * 16;
5996 if (expected_len != len) {
5997 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5998 expected_len, len);
5999 err = mgmt_cmd_complete(sk, hdev->id,
6000 MGMT_OP_START_SERVICE_DISCOVERY,
6001 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6002 sizeof(cp->type));
6003 goto failed;
6004 }
6005
6006 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6007 err = mgmt_cmd_complete(sk, hdev->id,
6008 MGMT_OP_START_SERVICE_DISCOVERY,
6009 status, &cp->type, sizeof(cp->type));
6010 goto failed;
6011 }
6012
6013 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6014 hdev, data, len);
6015 if (!cmd) {
6016 err = -ENOMEM;
6017 goto failed;
6018 }
6019
6020 /* Clear the discovery filter first to free any previously
6021 * allocated memory for the UUID list.
6022 */
6023 hci_discovery_filter_clear(hdev);
6024
6025 hdev->discovery.result_filtering = true;
6026 hdev->discovery.type = cp->type;
6027 hdev->discovery.rssi = cp->rssi;
6028 hdev->discovery.uuid_count = uuid_count;
6029
6030 if (uuid_count > 0) {
6031 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6032 GFP_KERNEL);
6033 if (!hdev->discovery.uuids) {
6034 err = mgmt_cmd_complete(sk, hdev->id,
6035 MGMT_OP_START_SERVICE_DISCOVERY,
6036 MGMT_STATUS_FAILED,
6037 &cp->type, sizeof(cp->type));
6038 mgmt_pending_remove(cmd);
6039 goto failed;
6040 }
6041 }
6042
6043 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6044 start_discovery_complete);
6045 if (err < 0) {
6046 mgmt_pending_remove(cmd);
6047 goto failed;
6048 }
6049
6050 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6051
6052 failed:
6053 hci_dev_unlock(hdev);
6054 return err;
6055 }
6056
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6057 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6058 {
6059 struct mgmt_pending_cmd *cmd;
6060
6061 bt_dev_dbg(hdev, "status %u", status);
6062
6063 hci_dev_lock(hdev);
6064
6065 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6066 if (cmd) {
6067 cmd->cmd_complete(cmd, mgmt_status(status));
6068 mgmt_pending_remove(cmd);
6069 }
6070
6071 hci_dev_unlock(hdev);
6072 }
6073
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6074 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6075 {
6076 struct mgmt_pending_cmd *cmd = data;
6077
6078 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6079 return;
6080
6081 bt_dev_dbg(hdev, "err %d", err);
6082
6083 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6084 cmd->param, 1);
6085 mgmt_pending_remove(cmd);
6086
6087 if (!err)
6088 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6089 }
6090
stop_discovery_sync(struct hci_dev * hdev,void * data)6091 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6092 {
6093 return hci_stop_discovery_sync(hdev);
6094 }
6095
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6096 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6097 u16 len)
6098 {
6099 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6100 struct mgmt_pending_cmd *cmd;
6101 int err;
6102
6103 bt_dev_dbg(hdev, "sock %p", sk);
6104
6105 hci_dev_lock(hdev);
6106
6107 if (!hci_discovery_active(hdev)) {
6108 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6109 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6110 sizeof(mgmt_cp->type));
6111 goto unlock;
6112 }
6113
6114 if (hdev->discovery.type != mgmt_cp->type) {
6115 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6116 MGMT_STATUS_INVALID_PARAMS,
6117 &mgmt_cp->type, sizeof(mgmt_cp->type));
6118 goto unlock;
6119 }
6120
6121 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6122 if (!cmd) {
6123 err = -ENOMEM;
6124 goto unlock;
6125 }
6126
6127 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6128 stop_discovery_complete);
6129 if (err < 0) {
6130 mgmt_pending_remove(cmd);
6131 goto unlock;
6132 }
6133
6134 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6135
6136 unlock:
6137 hci_dev_unlock(hdev);
6138 return err;
6139 }
6140
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6141 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6142 u16 len)
6143 {
6144 struct mgmt_cp_confirm_name *cp = data;
6145 struct inquiry_entry *e;
6146 int err;
6147
6148 bt_dev_dbg(hdev, "sock %p", sk);
6149
6150 hci_dev_lock(hdev);
6151
6152 if (!hci_discovery_active(hdev)) {
6153 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6154 MGMT_STATUS_FAILED, &cp->addr,
6155 sizeof(cp->addr));
6156 goto failed;
6157 }
6158
6159 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6160 if (!e) {
6161 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6162 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6163 sizeof(cp->addr));
6164 goto failed;
6165 }
6166
6167 if (cp->name_known) {
6168 e->name_state = NAME_KNOWN;
6169 list_del(&e->list);
6170 } else {
6171 e->name_state = NAME_NEEDED;
6172 hci_inquiry_cache_update_resolve(hdev, e);
6173 }
6174
6175 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6176 &cp->addr, sizeof(cp->addr));
6177
6178 failed:
6179 hci_dev_unlock(hdev);
6180 return err;
6181 }
6182
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6183 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6184 u16 len)
6185 {
6186 struct mgmt_cp_block_device *cp = data;
6187 u8 status;
6188 int err;
6189
6190 bt_dev_dbg(hdev, "sock %p", sk);
6191
6192 if (!bdaddr_type_is_valid(cp->addr.type))
6193 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6194 MGMT_STATUS_INVALID_PARAMS,
6195 &cp->addr, sizeof(cp->addr));
6196
6197 hci_dev_lock(hdev);
6198
6199 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6200 cp->addr.type);
6201 if (err < 0) {
6202 status = MGMT_STATUS_FAILED;
6203 goto done;
6204 }
6205
6206 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6207 sk);
6208 status = MGMT_STATUS_SUCCESS;
6209
6210 done:
6211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6212 &cp->addr, sizeof(cp->addr));
6213
6214 hci_dev_unlock(hdev);
6215
6216 return err;
6217 }
6218
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6219 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6220 u16 len)
6221 {
6222 struct mgmt_cp_unblock_device *cp = data;
6223 u8 status;
6224 int err;
6225
6226 bt_dev_dbg(hdev, "sock %p", sk);
6227
6228 if (!bdaddr_type_is_valid(cp->addr.type))
6229 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6230 MGMT_STATUS_INVALID_PARAMS,
6231 &cp->addr, sizeof(cp->addr));
6232
6233 hci_dev_lock(hdev);
6234
6235 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6236 cp->addr.type);
6237 if (err < 0) {
6238 status = MGMT_STATUS_INVALID_PARAMS;
6239 goto done;
6240 }
6241
6242 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6243 sk);
6244 status = MGMT_STATUS_SUCCESS;
6245
6246 done:
6247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6248 &cp->addr, sizeof(cp->addr));
6249
6250 hci_dev_unlock(hdev);
6251
6252 return err;
6253 }
6254
set_device_id_sync(struct hci_dev * hdev,void * data)6255 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6256 {
6257 return hci_update_eir_sync(hdev);
6258 }
6259
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6260 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6261 u16 len)
6262 {
6263 struct mgmt_cp_set_device_id *cp = data;
6264 int err;
6265 __u16 source;
6266
6267 bt_dev_dbg(hdev, "sock %p", sk);
6268
6269 source = __le16_to_cpu(cp->source);
6270
6271 if (source > 0x0002)
6272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6273 MGMT_STATUS_INVALID_PARAMS);
6274
6275 hci_dev_lock(hdev);
6276
6277 hdev->devid_source = source;
6278 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6279 hdev->devid_product = __le16_to_cpu(cp->product);
6280 hdev->devid_version = __le16_to_cpu(cp->version);
6281
6282 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6283 NULL, 0);
6284
6285 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6286
6287 hci_dev_unlock(hdev);
6288
6289 return err;
6290 }
6291
enable_advertising_instance(struct hci_dev * hdev,int err)6292 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6293 {
6294 if (err)
6295 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6296 else
6297 bt_dev_dbg(hdev, "status %d", err);
6298 }
6299
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6300 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6301 {
6302 struct cmd_lookup match = { NULL, hdev };
6303 u8 instance;
6304 struct adv_info *adv_instance;
6305 u8 status = mgmt_status(err);
6306
6307 if (status) {
6308 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6309 cmd_status_rsp, &status);
6310 return;
6311 }
6312
6313 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6314 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6315 else
6316 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6317
6318 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6319 &match);
6320
6321 new_settings(hdev, match.sk);
6322
6323 if (match.sk)
6324 sock_put(match.sk);
6325
6326 /* If "Set Advertising" was just disabled and instance advertising was
6327 * set up earlier, then re-enable multi-instance advertising.
6328 */
6329 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6330 list_empty(&hdev->adv_instances))
6331 return;
6332
6333 instance = hdev->cur_adv_instance;
6334 if (!instance) {
6335 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6336 struct adv_info, list);
6337 if (!adv_instance)
6338 return;
6339
6340 instance = adv_instance->instance;
6341 }
6342
6343 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6344
6345 enable_advertising_instance(hdev, err);
6346 }
6347
set_adv_sync(struct hci_dev * hdev,void * data)6348 static int set_adv_sync(struct hci_dev *hdev, void *data)
6349 {
6350 struct mgmt_pending_cmd *cmd = data;
6351 struct mgmt_mode *cp = cmd->param;
6352 u8 val = !!cp->val;
6353
6354 if (cp->val == 0x02)
6355 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6356 else
6357 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6358
6359 cancel_adv_timeout(hdev);
6360
6361 if (val) {
6362 /* Switch to instance "0" for the Set Advertising setting.
6363 * We cannot use update_[adv|scan_rsp]_data() here as the
6364 * HCI_ADVERTISING flag is not yet set.
6365 */
6366 hdev->cur_adv_instance = 0x00;
6367
6368 if (ext_adv_capable(hdev)) {
6369 hci_start_ext_adv_sync(hdev, 0x00);
6370 } else {
6371 hci_update_adv_data_sync(hdev, 0x00);
6372 hci_update_scan_rsp_data_sync(hdev, 0x00);
6373 hci_enable_advertising_sync(hdev);
6374 }
6375 } else {
6376 hci_disable_advertising_sync(hdev);
6377 }
6378
6379 return 0;
6380 }
6381
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6382 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6383 u16 len)
6384 {
6385 struct mgmt_mode *cp = data;
6386 struct mgmt_pending_cmd *cmd;
6387 u8 val, status;
6388 int err;
6389
6390 bt_dev_dbg(hdev, "sock %p", sk);
6391
6392 status = mgmt_le_support(hdev);
6393 if (status)
6394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6395 status);
6396
6397 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6399 MGMT_STATUS_INVALID_PARAMS);
6400
6401 if (hdev->advertising_paused)
6402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6403 MGMT_STATUS_BUSY);
6404
6405 hci_dev_lock(hdev);
6406
6407 val = !!cp->val;
6408
6409 /* The following conditions are ones which mean that we should
6410 * not do any HCI communication but directly send a mgmt
6411 * response to user space (after toggling the flag if
6412 * necessary).
6413 */
6414 if (!hdev_is_powered(hdev) ||
6415 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6416 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6417 hci_dev_test_flag(hdev, HCI_MESH) ||
6418 hci_conn_num(hdev, LE_LINK) > 0 ||
6419 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6420 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6421 bool changed;
6422
6423 if (cp->val) {
6424 hdev->cur_adv_instance = 0x00;
6425 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6426 if (cp->val == 0x02)
6427 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6428 else
6429 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6430 } else {
6431 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6432 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6433 }
6434
6435 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6436 if (err < 0)
6437 goto unlock;
6438
6439 if (changed)
6440 err = new_settings(hdev, sk);
6441
6442 goto unlock;
6443 }
6444
6445 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6446 pending_find(MGMT_OP_SET_LE, hdev)) {
6447 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448 MGMT_STATUS_BUSY);
6449 goto unlock;
6450 }
6451
6452 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6453 if (!cmd)
6454 err = -ENOMEM;
6455 else
6456 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6457 set_advertising_complete);
6458
6459 if (err < 0 && cmd)
6460 mgmt_pending_remove(cmd);
6461
6462 unlock:
6463 hci_dev_unlock(hdev);
6464 return err;
6465 }
6466
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6467 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6468 void *data, u16 len)
6469 {
6470 struct mgmt_cp_set_static_address *cp = data;
6471 int err;
6472
6473 bt_dev_dbg(hdev, "sock %p", sk);
6474
6475 if (!lmp_le_capable(hdev))
6476 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6477 MGMT_STATUS_NOT_SUPPORTED);
6478
6479 if (hdev_is_powered(hdev))
6480 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6481 MGMT_STATUS_REJECTED);
6482
6483 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6484 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6485 return mgmt_cmd_status(sk, hdev->id,
6486 MGMT_OP_SET_STATIC_ADDRESS,
6487 MGMT_STATUS_INVALID_PARAMS);
6488
6489 /* Two most significant bits shall be set */
6490 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6491 return mgmt_cmd_status(sk, hdev->id,
6492 MGMT_OP_SET_STATIC_ADDRESS,
6493 MGMT_STATUS_INVALID_PARAMS);
6494 }
6495
6496 hci_dev_lock(hdev);
6497
6498 bacpy(&hdev->static_addr, &cp->bdaddr);
6499
6500 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6501 if (err < 0)
6502 goto unlock;
6503
6504 err = new_settings(hdev, sk);
6505
6506 unlock:
6507 hci_dev_unlock(hdev);
6508 return err;
6509 }
6510
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6511 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6512 void *data, u16 len)
6513 {
6514 struct mgmt_cp_set_scan_params *cp = data;
6515 __u16 interval, window;
6516 int err;
6517
6518 bt_dev_dbg(hdev, "sock %p", sk);
6519
6520 if (!lmp_le_capable(hdev))
6521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6522 MGMT_STATUS_NOT_SUPPORTED);
6523
6524 interval = __le16_to_cpu(cp->interval);
6525
6526 if (interval < 0x0004 || interval > 0x4000)
6527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6528 MGMT_STATUS_INVALID_PARAMS);
6529
6530 window = __le16_to_cpu(cp->window);
6531
6532 if (window < 0x0004 || window > 0x4000)
6533 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6534 MGMT_STATUS_INVALID_PARAMS);
6535
6536 if (window > interval)
6537 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6538 MGMT_STATUS_INVALID_PARAMS);
6539
6540 hci_dev_lock(hdev);
6541
6542 hdev->le_scan_interval = interval;
6543 hdev->le_scan_window = window;
6544
6545 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6546 NULL, 0);
6547
6548 /* If background scan is running, restart it so new parameters are
6549 * loaded.
6550 */
6551 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6552 hdev->discovery.state == DISCOVERY_STOPPED)
6553 hci_update_passive_scan(hdev);
6554
6555 hci_dev_unlock(hdev);
6556
6557 return err;
6558 }
6559
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6560 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6561 {
6562 struct mgmt_pending_cmd *cmd = data;
6563
6564 bt_dev_dbg(hdev, "err %d", err);
6565
6566 if (err) {
6567 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6568 mgmt_status(err));
6569 } else {
6570 struct mgmt_mode *cp = cmd->param;
6571
6572 if (cp->val)
6573 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6574 else
6575 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6576
6577 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6578 new_settings(hdev, cmd->sk);
6579 }
6580
6581 mgmt_pending_free(cmd);
6582 }
6583
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6584 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6585 {
6586 struct mgmt_pending_cmd *cmd = data;
6587 struct mgmt_mode *cp = cmd->param;
6588
6589 return hci_write_fast_connectable_sync(hdev, cp->val);
6590 }
6591
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6592 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6593 void *data, u16 len)
6594 {
6595 struct mgmt_mode *cp = data;
6596 struct mgmt_pending_cmd *cmd;
6597 int err;
6598
6599 bt_dev_dbg(hdev, "sock %p", sk);
6600
6601 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6602 hdev->hci_ver < BLUETOOTH_VER_1_2)
6603 return mgmt_cmd_status(sk, hdev->id,
6604 MGMT_OP_SET_FAST_CONNECTABLE,
6605 MGMT_STATUS_NOT_SUPPORTED);
6606
6607 if (cp->val != 0x00 && cp->val != 0x01)
6608 return mgmt_cmd_status(sk, hdev->id,
6609 MGMT_OP_SET_FAST_CONNECTABLE,
6610 MGMT_STATUS_INVALID_PARAMS);
6611
6612 hci_dev_lock(hdev);
6613
6614 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6615 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6616 goto unlock;
6617 }
6618
6619 if (!hdev_is_powered(hdev)) {
6620 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6621 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6622 new_settings(hdev, sk);
6623 goto unlock;
6624 }
6625
6626 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6627 len);
6628 if (!cmd)
6629 err = -ENOMEM;
6630 else
6631 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6632 fast_connectable_complete);
6633
6634 if (err < 0) {
6635 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6636 MGMT_STATUS_FAILED);
6637
6638 if (cmd)
6639 mgmt_pending_free(cmd);
6640 }
6641
6642 unlock:
6643 hci_dev_unlock(hdev);
6644
6645 return err;
6646 }
6647
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6648 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6649 {
6650 struct mgmt_pending_cmd *cmd = data;
6651
6652 bt_dev_dbg(hdev, "err %d", err);
6653
6654 if (err) {
6655 u8 mgmt_err = mgmt_status(err);
6656
6657 /* We need to restore the flag if related HCI commands
6658 * failed.
6659 */
6660 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6661
6662 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6663 } else {
6664 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6665 new_settings(hdev, cmd->sk);
6666 }
6667
6668 mgmt_pending_free(cmd);
6669 }
6670
set_bredr_sync(struct hci_dev * hdev,void * data)6671 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6672 {
6673 int status;
6674
6675 status = hci_write_fast_connectable_sync(hdev, false);
6676
6677 if (!status)
6678 status = hci_update_scan_sync(hdev);
6679
6680 /* Since only the advertising data flags will change, there
6681 * is no need to update the scan response data.
6682 */
6683 if (!status)
6684 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6685
6686 return status;
6687 }
6688
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6689 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6690 {
6691 struct mgmt_mode *cp = data;
6692 struct mgmt_pending_cmd *cmd;
6693 int err;
6694
6695 bt_dev_dbg(hdev, "sock %p", sk);
6696
6697 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6699 MGMT_STATUS_NOT_SUPPORTED);
6700
6701 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6702 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6703 MGMT_STATUS_REJECTED);
6704
6705 if (cp->val != 0x00 && cp->val != 0x01)
6706 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6707 MGMT_STATUS_INVALID_PARAMS);
6708
6709 hci_dev_lock(hdev);
6710
6711 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6712 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6713 goto unlock;
6714 }
6715
6716 if (!hdev_is_powered(hdev)) {
6717 if (!cp->val) {
6718 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6719 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6720 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6721 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6722 }
6723
6724 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6725
6726 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6727 if (err < 0)
6728 goto unlock;
6729
6730 err = new_settings(hdev, sk);
6731 goto unlock;
6732 }
6733
6734 /* Reject disabling when powered on */
6735 if (!cp->val) {
6736 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6737 MGMT_STATUS_REJECTED);
6738 goto unlock;
6739 } else {
6740 /* When configuring a dual-mode controller to operate
6741 * with LE only and using a static address, then switching
6742 * BR/EDR back on is not allowed.
6743 *
6744 * Dual-mode controllers shall operate with the public
6745 * address as its identity address for BR/EDR and LE. So
6746 * reject the attempt to create an invalid configuration.
6747 *
6748 * The same restrictions applies when secure connections
6749 * has been enabled. For BR/EDR this is a controller feature
6750 * while for LE it is a host stack feature. This means that
6751 * switching BR/EDR back on when secure connections has been
6752 * enabled is not a supported transaction.
6753 */
6754 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6755 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6756 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6758 MGMT_STATUS_REJECTED);
6759 goto unlock;
6760 }
6761 }
6762
6763 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6764 if (!cmd)
6765 err = -ENOMEM;
6766 else
6767 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6768 set_bredr_complete);
6769
6770 if (err < 0) {
6771 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6772 MGMT_STATUS_FAILED);
6773 if (cmd)
6774 mgmt_pending_free(cmd);
6775
6776 goto unlock;
6777 }
6778
6779 /* We need to flip the bit already here so that
6780 * hci_req_update_adv_data generates the correct flags.
6781 */
6782 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6783
6784 unlock:
6785 hci_dev_unlock(hdev);
6786 return err;
6787 }
6788
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6789 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6790 {
6791 struct mgmt_pending_cmd *cmd = data;
6792 struct mgmt_mode *cp;
6793
6794 bt_dev_dbg(hdev, "err %d", err);
6795
6796 if (err) {
6797 u8 mgmt_err = mgmt_status(err);
6798
6799 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6800 goto done;
6801 }
6802
6803 cp = cmd->param;
6804
6805 switch (cp->val) {
6806 case 0x00:
6807 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6808 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6809 break;
6810 case 0x01:
6811 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6812 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6813 break;
6814 case 0x02:
6815 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6816 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6817 break;
6818 }
6819
6820 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6821 new_settings(hdev, cmd->sk);
6822
6823 done:
6824 mgmt_pending_free(cmd);
6825 }
6826
set_secure_conn_sync(struct hci_dev * hdev,void * data)6827 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6828 {
6829 struct mgmt_pending_cmd *cmd = data;
6830 struct mgmt_mode *cp = cmd->param;
6831 u8 val = !!cp->val;
6832
6833 /* Force write of val */
6834 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6835
6836 return hci_write_sc_support_sync(hdev, val);
6837 }
6838
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6839 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6840 void *data, u16 len)
6841 {
6842 struct mgmt_mode *cp = data;
6843 struct mgmt_pending_cmd *cmd;
6844 u8 val;
6845 int err;
6846
6847 bt_dev_dbg(hdev, "sock %p", sk);
6848
6849 if (!lmp_sc_capable(hdev) &&
6850 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6852 MGMT_STATUS_NOT_SUPPORTED);
6853
6854 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6855 lmp_sc_capable(hdev) &&
6856 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6858 MGMT_STATUS_REJECTED);
6859
6860 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6862 MGMT_STATUS_INVALID_PARAMS);
6863
6864 hci_dev_lock(hdev);
6865
6866 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6867 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6868 bool changed;
6869
6870 if (cp->val) {
6871 changed = !hci_dev_test_and_set_flag(hdev,
6872 HCI_SC_ENABLED);
6873 if (cp->val == 0x02)
6874 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6875 else
6876 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6877 } else {
6878 changed = hci_dev_test_and_clear_flag(hdev,
6879 HCI_SC_ENABLED);
6880 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6881 }
6882
6883 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6884 if (err < 0)
6885 goto failed;
6886
6887 if (changed)
6888 err = new_settings(hdev, sk);
6889
6890 goto failed;
6891 }
6892
6893 val = !!cp->val;
6894
6895 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6896 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6897 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6898 goto failed;
6899 }
6900
6901 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6902 if (!cmd)
6903 err = -ENOMEM;
6904 else
6905 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6906 set_secure_conn_complete);
6907
6908 if (err < 0) {
6909 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910 MGMT_STATUS_FAILED);
6911 if (cmd)
6912 mgmt_pending_free(cmd);
6913 }
6914
6915 failed:
6916 hci_dev_unlock(hdev);
6917 return err;
6918 }
6919
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6920 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6921 void *data, u16 len)
6922 {
6923 struct mgmt_mode *cp = data;
6924 bool changed, use_changed;
6925 int err;
6926
6927 bt_dev_dbg(hdev, "sock %p", sk);
6928
6929 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6931 MGMT_STATUS_INVALID_PARAMS);
6932
6933 hci_dev_lock(hdev);
6934
6935 if (cp->val)
6936 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6937 else
6938 changed = hci_dev_test_and_clear_flag(hdev,
6939 HCI_KEEP_DEBUG_KEYS);
6940
6941 if (cp->val == 0x02)
6942 use_changed = !hci_dev_test_and_set_flag(hdev,
6943 HCI_USE_DEBUG_KEYS);
6944 else
6945 use_changed = hci_dev_test_and_clear_flag(hdev,
6946 HCI_USE_DEBUG_KEYS);
6947
6948 if (hdev_is_powered(hdev) && use_changed &&
6949 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6950 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6951 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6952 sizeof(mode), &mode);
6953 }
6954
6955 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6956 if (err < 0)
6957 goto unlock;
6958
6959 if (changed)
6960 err = new_settings(hdev, sk);
6961
6962 unlock:
6963 hci_dev_unlock(hdev);
6964 return err;
6965 }
6966
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6967 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6968 u16 len)
6969 {
6970 struct mgmt_cp_set_privacy *cp = cp_data;
6971 bool changed;
6972 int err;
6973
6974 bt_dev_dbg(hdev, "sock %p", sk);
6975
6976 if (!lmp_le_capable(hdev))
6977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6978 MGMT_STATUS_NOT_SUPPORTED);
6979
6980 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6982 MGMT_STATUS_INVALID_PARAMS);
6983
6984 if (hdev_is_powered(hdev))
6985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6986 MGMT_STATUS_REJECTED);
6987
6988 hci_dev_lock(hdev);
6989
6990 /* If user space supports this command it is also expected to
6991 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6992 */
6993 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6994
6995 if (cp->privacy) {
6996 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6997 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6998 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6999 hci_adv_instances_set_rpa_expired(hdev, true);
7000 if (cp->privacy == 0x02)
7001 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7002 else
7003 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7004 } else {
7005 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7006 memset(hdev->irk, 0, sizeof(hdev->irk));
7007 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7008 hci_adv_instances_set_rpa_expired(hdev, false);
7009 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7010 }
7011
7012 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7013 if (err < 0)
7014 goto unlock;
7015
7016 if (changed)
7017 err = new_settings(hdev, sk);
7018
7019 unlock:
7020 hci_dev_unlock(hdev);
7021 return err;
7022 }
7023
irk_is_valid(struct mgmt_irk_info * irk)7024 static bool irk_is_valid(struct mgmt_irk_info *irk)
7025 {
7026 switch (irk->addr.type) {
7027 case BDADDR_LE_PUBLIC:
7028 return true;
7029
7030 case BDADDR_LE_RANDOM:
7031 /* Two most significant bits shall be set */
7032 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7033 return false;
7034 return true;
7035 }
7036
7037 return false;
7038 }
7039
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7040 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7041 u16 len)
7042 {
7043 struct mgmt_cp_load_irks *cp = cp_data;
7044 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7045 sizeof(struct mgmt_irk_info));
7046 u16 irk_count, expected_len;
7047 int i, err;
7048
7049 bt_dev_dbg(hdev, "sock %p", sk);
7050
7051 if (!lmp_le_capable(hdev))
7052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7053 MGMT_STATUS_NOT_SUPPORTED);
7054
7055 irk_count = __le16_to_cpu(cp->irk_count);
7056 if (irk_count > max_irk_count) {
7057 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7058 irk_count);
7059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7060 MGMT_STATUS_INVALID_PARAMS);
7061 }
7062
7063 expected_len = struct_size(cp, irks, irk_count);
7064 if (expected_len != len) {
7065 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7066 expected_len, len);
7067 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7068 MGMT_STATUS_INVALID_PARAMS);
7069 }
7070
7071 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7072
7073 for (i = 0; i < irk_count; i++) {
7074 struct mgmt_irk_info *key = &cp->irks[i];
7075
7076 if (!irk_is_valid(key))
7077 return mgmt_cmd_status(sk, hdev->id,
7078 MGMT_OP_LOAD_IRKS,
7079 MGMT_STATUS_INVALID_PARAMS);
7080 }
7081
7082 hci_dev_lock(hdev);
7083
7084 hci_smp_irks_clear(hdev);
7085
7086 for (i = 0; i < irk_count; i++) {
7087 struct mgmt_irk_info *irk = &cp->irks[i];
7088
7089 if (hci_is_blocked_key(hdev,
7090 HCI_BLOCKED_KEY_TYPE_IRK,
7091 irk->val)) {
7092 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7093 &irk->addr.bdaddr);
7094 continue;
7095 }
7096
7097 hci_add_irk(hdev, &irk->addr.bdaddr,
7098 le_addr_type(irk->addr.type), irk->val,
7099 BDADDR_ANY);
7100 }
7101
7102 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7103
7104 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7105
7106 hci_dev_unlock(hdev);
7107
7108 return err;
7109 }
7110
ltk_is_valid(struct mgmt_ltk_info * key)7111 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7112 {
7113 if (key->initiator != 0x00 && key->initiator != 0x01)
7114 return false;
7115
7116 switch (key->addr.type) {
7117 case BDADDR_LE_PUBLIC:
7118 return true;
7119
7120 case BDADDR_LE_RANDOM:
7121 /* Two most significant bits shall be set */
7122 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7123 return false;
7124 return true;
7125 }
7126
7127 return false;
7128 }
7129
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7130 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7131 void *cp_data, u16 len)
7132 {
7133 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7134 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7135 sizeof(struct mgmt_ltk_info));
7136 u16 key_count, expected_len;
7137 int i, err;
7138
7139 bt_dev_dbg(hdev, "sock %p", sk);
7140
7141 if (!lmp_le_capable(hdev))
7142 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7143 MGMT_STATUS_NOT_SUPPORTED);
7144
7145 key_count = __le16_to_cpu(cp->key_count);
7146 if (key_count > max_key_count) {
7147 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7148 key_count);
7149 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7150 MGMT_STATUS_INVALID_PARAMS);
7151 }
7152
7153 expected_len = struct_size(cp, keys, key_count);
7154 if (expected_len != len) {
7155 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7156 expected_len, len);
7157 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7158 MGMT_STATUS_INVALID_PARAMS);
7159 }
7160
7161 bt_dev_dbg(hdev, "key_count %u", key_count);
7162
7163 hci_dev_lock(hdev);
7164
7165 hci_smp_ltks_clear(hdev);
7166
7167 for (i = 0; i < key_count; i++) {
7168 struct mgmt_ltk_info *key = &cp->keys[i];
7169 u8 type, authenticated;
7170
7171 if (hci_is_blocked_key(hdev,
7172 HCI_BLOCKED_KEY_TYPE_LTK,
7173 key->val)) {
7174 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7175 &key->addr.bdaddr);
7176 continue;
7177 }
7178
7179 if (!ltk_is_valid(key)) {
7180 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7181 &key->addr.bdaddr);
7182 continue;
7183 }
7184
7185 switch (key->type) {
7186 case MGMT_LTK_UNAUTHENTICATED:
7187 authenticated = 0x00;
7188 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7189 break;
7190 case MGMT_LTK_AUTHENTICATED:
7191 authenticated = 0x01;
7192 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7193 break;
7194 case MGMT_LTK_P256_UNAUTH:
7195 authenticated = 0x00;
7196 type = SMP_LTK_P256;
7197 break;
7198 case MGMT_LTK_P256_AUTH:
7199 authenticated = 0x01;
7200 type = SMP_LTK_P256;
7201 break;
7202 case MGMT_LTK_P256_DEBUG:
7203 authenticated = 0x00;
7204 type = SMP_LTK_P256_DEBUG;
7205 fallthrough;
7206 default:
7207 continue;
7208 }
7209
7210 hci_add_ltk(hdev, &key->addr.bdaddr,
7211 le_addr_type(key->addr.type), type, authenticated,
7212 key->val, key->enc_size, key->ediv, key->rand);
7213 }
7214
7215 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7216 NULL, 0);
7217
7218 hci_dev_unlock(hdev);
7219
7220 return err;
7221 }
7222
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7223 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7224 {
7225 struct mgmt_pending_cmd *cmd = data;
7226 struct hci_conn *conn = cmd->user_data;
7227 struct mgmt_cp_get_conn_info *cp = cmd->param;
7228 struct mgmt_rp_get_conn_info rp;
7229 u8 status;
7230
7231 bt_dev_dbg(hdev, "err %d", err);
7232
7233 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7234
7235 status = mgmt_status(err);
7236 if (status == MGMT_STATUS_SUCCESS) {
7237 rp.rssi = conn->rssi;
7238 rp.tx_power = conn->tx_power;
7239 rp.max_tx_power = conn->max_tx_power;
7240 } else {
7241 rp.rssi = HCI_RSSI_INVALID;
7242 rp.tx_power = HCI_TX_POWER_INVALID;
7243 rp.max_tx_power = HCI_TX_POWER_INVALID;
7244 }
7245
7246 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7247 &rp, sizeof(rp));
7248
7249 mgmt_pending_free(cmd);
7250 }
7251
get_conn_info_sync(struct hci_dev * hdev,void * data)7252 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7253 {
7254 struct mgmt_pending_cmd *cmd = data;
7255 struct mgmt_cp_get_conn_info *cp = cmd->param;
7256 struct hci_conn *conn;
7257 int err;
7258 __le16 handle;
7259
7260 /* Make sure we are still connected */
7261 if (cp->addr.type == BDADDR_BREDR)
7262 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7263 &cp->addr.bdaddr);
7264 else
7265 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7266
7267 if (!conn || conn->state != BT_CONNECTED)
7268 return MGMT_STATUS_NOT_CONNECTED;
7269
7270 cmd->user_data = conn;
7271 handle = cpu_to_le16(conn->handle);
7272
7273 /* Refresh RSSI each time */
7274 err = hci_read_rssi_sync(hdev, handle);
7275
7276 /* For LE links TX power does not change thus we don't need to
7277 * query for it once value is known.
7278 */
7279 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7280 conn->tx_power == HCI_TX_POWER_INVALID))
7281 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7282
7283 /* Max TX power needs to be read only once per connection */
7284 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7285 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7286
7287 return err;
7288 }
7289
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7290 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7291 u16 len)
7292 {
7293 struct mgmt_cp_get_conn_info *cp = data;
7294 struct mgmt_rp_get_conn_info rp;
7295 struct hci_conn *conn;
7296 unsigned long conn_info_age;
7297 int err = 0;
7298
7299 bt_dev_dbg(hdev, "sock %p", sk);
7300
7301 memset(&rp, 0, sizeof(rp));
7302 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7303 rp.addr.type = cp->addr.type;
7304
7305 if (!bdaddr_type_is_valid(cp->addr.type))
7306 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7307 MGMT_STATUS_INVALID_PARAMS,
7308 &rp, sizeof(rp));
7309
7310 hci_dev_lock(hdev);
7311
7312 if (!hdev_is_powered(hdev)) {
7313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7314 MGMT_STATUS_NOT_POWERED, &rp,
7315 sizeof(rp));
7316 goto unlock;
7317 }
7318
7319 if (cp->addr.type == BDADDR_BREDR)
7320 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7321 &cp->addr.bdaddr);
7322 else
7323 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7324
7325 if (!conn || conn->state != BT_CONNECTED) {
7326 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7327 MGMT_STATUS_NOT_CONNECTED, &rp,
7328 sizeof(rp));
7329 goto unlock;
7330 }
7331
7332 /* To avoid client trying to guess when to poll again for information we
7333 * calculate conn info age as random value between min/max set in hdev.
7334 */
7335 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7336 hdev->conn_info_max_age - 1);
7337
7338 /* Query controller to refresh cached values if they are too old or were
7339 * never read.
7340 */
7341 if (time_after(jiffies, conn->conn_info_timestamp +
7342 msecs_to_jiffies(conn_info_age)) ||
7343 !conn->conn_info_timestamp) {
7344 struct mgmt_pending_cmd *cmd;
7345
7346 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7347 len);
7348 if (!cmd) {
7349 err = -ENOMEM;
7350 } else {
7351 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7352 cmd, get_conn_info_complete);
7353 }
7354
7355 if (err < 0) {
7356 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7357 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7358
7359 if (cmd)
7360 mgmt_pending_free(cmd);
7361
7362 goto unlock;
7363 }
7364
7365 conn->conn_info_timestamp = jiffies;
7366 } else {
7367 /* Cache is valid, just reply with values cached in hci_conn */
7368 rp.rssi = conn->rssi;
7369 rp.tx_power = conn->tx_power;
7370 rp.max_tx_power = conn->max_tx_power;
7371
7372 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7373 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7374 }
7375
7376 unlock:
7377 hci_dev_unlock(hdev);
7378 return err;
7379 }
7380
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7381 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7382 {
7383 struct mgmt_pending_cmd *cmd = data;
7384 struct mgmt_cp_get_clock_info *cp = cmd->param;
7385 struct mgmt_rp_get_clock_info rp;
7386 struct hci_conn *conn = cmd->user_data;
7387 u8 status = mgmt_status(err);
7388
7389 bt_dev_dbg(hdev, "err %d", err);
7390
7391 memset(&rp, 0, sizeof(rp));
7392 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7393 rp.addr.type = cp->addr.type;
7394
7395 if (err)
7396 goto complete;
7397
7398 rp.local_clock = cpu_to_le32(hdev->clock);
7399
7400 if (conn) {
7401 rp.piconet_clock = cpu_to_le32(conn->clock);
7402 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7403 }
7404
7405 complete:
7406 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7407 sizeof(rp));
7408
7409 mgmt_pending_free(cmd);
7410 }
7411
get_clock_info_sync(struct hci_dev * hdev,void * data)7412 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7413 {
7414 struct mgmt_pending_cmd *cmd = data;
7415 struct mgmt_cp_get_clock_info *cp = cmd->param;
7416 struct hci_cp_read_clock hci_cp;
7417 struct hci_conn *conn;
7418
7419 memset(&hci_cp, 0, sizeof(hci_cp));
7420 hci_read_clock_sync(hdev, &hci_cp);
7421
7422 /* Make sure connection still exists */
7423 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7424 if (!conn || conn->state != BT_CONNECTED)
7425 return MGMT_STATUS_NOT_CONNECTED;
7426
7427 cmd->user_data = conn;
7428 hci_cp.handle = cpu_to_le16(conn->handle);
7429 hci_cp.which = 0x01; /* Piconet clock */
7430
7431 return hci_read_clock_sync(hdev, &hci_cp);
7432 }
7433
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7434 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7435 u16 len)
7436 {
7437 struct mgmt_cp_get_clock_info *cp = data;
7438 struct mgmt_rp_get_clock_info rp;
7439 struct mgmt_pending_cmd *cmd;
7440 struct hci_conn *conn;
7441 int err;
7442
7443 bt_dev_dbg(hdev, "sock %p", sk);
7444
7445 memset(&rp, 0, sizeof(rp));
7446 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7447 rp.addr.type = cp->addr.type;
7448
7449 if (cp->addr.type != BDADDR_BREDR)
7450 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7451 MGMT_STATUS_INVALID_PARAMS,
7452 &rp, sizeof(rp));
7453
7454 hci_dev_lock(hdev);
7455
7456 if (!hdev_is_powered(hdev)) {
7457 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7458 MGMT_STATUS_NOT_POWERED, &rp,
7459 sizeof(rp));
7460 goto unlock;
7461 }
7462
7463 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7464 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7465 &cp->addr.bdaddr);
7466 if (!conn || conn->state != BT_CONNECTED) {
7467 err = mgmt_cmd_complete(sk, hdev->id,
7468 MGMT_OP_GET_CLOCK_INFO,
7469 MGMT_STATUS_NOT_CONNECTED,
7470 &rp, sizeof(rp));
7471 goto unlock;
7472 }
7473 } else {
7474 conn = NULL;
7475 }
7476
7477 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7478 if (!cmd)
7479 err = -ENOMEM;
7480 else
7481 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7482 get_clock_info_complete);
7483
7484 if (err < 0) {
7485 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7486 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7487
7488 if (cmd)
7489 mgmt_pending_free(cmd);
7490 }
7491
7492
7493 unlock:
7494 hci_dev_unlock(hdev);
7495 return err;
7496 }
7497
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7498 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7499 {
7500 struct hci_conn *conn;
7501
7502 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7503 if (!conn)
7504 return false;
7505
7506 if (conn->dst_type != type)
7507 return false;
7508
7509 if (conn->state != BT_CONNECTED)
7510 return false;
7511
7512 return true;
7513 }
7514
7515 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7516 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7517 u8 addr_type, u8 auto_connect)
7518 {
7519 struct hci_conn_params *params;
7520
7521 params = hci_conn_params_add(hdev, addr, addr_type);
7522 if (!params)
7523 return -EIO;
7524
7525 if (params->auto_connect == auto_connect)
7526 return 0;
7527
7528 hci_pend_le_list_del_init(params);
7529
7530 switch (auto_connect) {
7531 case HCI_AUTO_CONN_DISABLED:
7532 case HCI_AUTO_CONN_LINK_LOSS:
7533 /* If auto connect is being disabled when we're trying to
7534 * connect to device, keep connecting.
7535 */
7536 if (params->explicit_connect)
7537 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7538 break;
7539 case HCI_AUTO_CONN_REPORT:
7540 if (params->explicit_connect)
7541 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7542 else
7543 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7544 break;
7545 case HCI_AUTO_CONN_DIRECT:
7546 case HCI_AUTO_CONN_ALWAYS:
7547 if (!is_connected(hdev, addr, addr_type))
7548 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7549 break;
7550 }
7551
7552 params->auto_connect = auto_connect;
7553
7554 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7555 addr, addr_type, auto_connect);
7556
7557 return 0;
7558 }
7559
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7560 static void device_added(struct sock *sk, struct hci_dev *hdev,
7561 bdaddr_t *bdaddr, u8 type, u8 action)
7562 {
7563 struct mgmt_ev_device_added ev;
7564
7565 bacpy(&ev.addr.bdaddr, bdaddr);
7566 ev.addr.type = type;
7567 ev.action = action;
7568
7569 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7570 }
7571
add_device_sync(struct hci_dev * hdev,void * data)7572 static int add_device_sync(struct hci_dev *hdev, void *data)
7573 {
7574 return hci_update_passive_scan_sync(hdev);
7575 }
7576
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7577 static int add_device(struct sock *sk, struct hci_dev *hdev,
7578 void *data, u16 len)
7579 {
7580 struct mgmt_cp_add_device *cp = data;
7581 u8 auto_conn, addr_type;
7582 struct hci_conn_params *params;
7583 int err;
7584 u32 current_flags = 0;
7585 u32 supported_flags;
7586
7587 bt_dev_dbg(hdev, "sock %p", sk);
7588
7589 if (!bdaddr_type_is_valid(cp->addr.type) ||
7590 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7591 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7592 MGMT_STATUS_INVALID_PARAMS,
7593 &cp->addr, sizeof(cp->addr));
7594
7595 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7596 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7597 MGMT_STATUS_INVALID_PARAMS,
7598 &cp->addr, sizeof(cp->addr));
7599
7600 hci_dev_lock(hdev);
7601
7602 if (cp->addr.type == BDADDR_BREDR) {
7603 /* Only incoming connections action is supported for now */
7604 if (cp->action != 0x01) {
7605 err = mgmt_cmd_complete(sk, hdev->id,
7606 MGMT_OP_ADD_DEVICE,
7607 MGMT_STATUS_INVALID_PARAMS,
7608 &cp->addr, sizeof(cp->addr));
7609 goto unlock;
7610 }
7611
7612 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7613 &cp->addr.bdaddr,
7614 cp->addr.type, 0);
7615 if (err)
7616 goto unlock;
7617
7618 hci_update_scan(hdev);
7619
7620 goto added;
7621 }
7622
7623 addr_type = le_addr_type(cp->addr.type);
7624
7625 if (cp->action == 0x02)
7626 auto_conn = HCI_AUTO_CONN_ALWAYS;
7627 else if (cp->action == 0x01)
7628 auto_conn = HCI_AUTO_CONN_DIRECT;
7629 else
7630 auto_conn = HCI_AUTO_CONN_REPORT;
7631
7632 /* Kernel internally uses conn_params with resolvable private
7633 * address, but Add Device allows only identity addresses.
7634 * Make sure it is enforced before calling
7635 * hci_conn_params_lookup.
7636 */
7637 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7638 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7639 MGMT_STATUS_INVALID_PARAMS,
7640 &cp->addr, sizeof(cp->addr));
7641 goto unlock;
7642 }
7643
7644 /* If the connection parameters don't exist for this device,
7645 * they will be created and configured with defaults.
7646 */
7647 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7648 auto_conn) < 0) {
7649 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7650 MGMT_STATUS_FAILED, &cp->addr,
7651 sizeof(cp->addr));
7652 goto unlock;
7653 } else {
7654 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7655 addr_type);
7656 if (params)
7657 current_flags = params->flags;
7658 }
7659
7660 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7661 if (err < 0)
7662 goto unlock;
7663
7664 added:
7665 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7666 supported_flags = hdev->conn_flags;
7667 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7668 supported_flags, current_flags);
7669
7670 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7671 MGMT_STATUS_SUCCESS, &cp->addr,
7672 sizeof(cp->addr));
7673
7674 unlock:
7675 hci_dev_unlock(hdev);
7676 return err;
7677 }
7678
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7679 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7680 bdaddr_t *bdaddr, u8 type)
7681 {
7682 struct mgmt_ev_device_removed ev;
7683
7684 bacpy(&ev.addr.bdaddr, bdaddr);
7685 ev.addr.type = type;
7686
7687 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7688 }
7689
remove_device_sync(struct hci_dev * hdev,void * data)7690 static int remove_device_sync(struct hci_dev *hdev, void *data)
7691 {
7692 return hci_update_passive_scan_sync(hdev);
7693 }
7694
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7695 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7696 void *data, u16 len)
7697 {
7698 struct mgmt_cp_remove_device *cp = data;
7699 int err;
7700
7701 bt_dev_dbg(hdev, "sock %p", sk);
7702
7703 hci_dev_lock(hdev);
7704
7705 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7706 struct hci_conn_params *params;
7707 u8 addr_type;
7708
7709 if (!bdaddr_type_is_valid(cp->addr.type)) {
7710 err = mgmt_cmd_complete(sk, hdev->id,
7711 MGMT_OP_REMOVE_DEVICE,
7712 MGMT_STATUS_INVALID_PARAMS,
7713 &cp->addr, sizeof(cp->addr));
7714 goto unlock;
7715 }
7716
7717 if (cp->addr.type == BDADDR_BREDR) {
7718 err = hci_bdaddr_list_del(&hdev->accept_list,
7719 &cp->addr.bdaddr,
7720 cp->addr.type);
7721 if (err) {
7722 err = mgmt_cmd_complete(sk, hdev->id,
7723 MGMT_OP_REMOVE_DEVICE,
7724 MGMT_STATUS_INVALID_PARAMS,
7725 &cp->addr,
7726 sizeof(cp->addr));
7727 goto unlock;
7728 }
7729
7730 hci_update_scan(hdev);
7731
7732 device_removed(sk, hdev, &cp->addr.bdaddr,
7733 cp->addr.type);
7734 goto complete;
7735 }
7736
7737 addr_type = le_addr_type(cp->addr.type);
7738
7739 /* Kernel internally uses conn_params with resolvable private
7740 * address, but Remove Device allows only identity addresses.
7741 * Make sure it is enforced before calling
7742 * hci_conn_params_lookup.
7743 */
7744 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7745 err = mgmt_cmd_complete(sk, hdev->id,
7746 MGMT_OP_REMOVE_DEVICE,
7747 MGMT_STATUS_INVALID_PARAMS,
7748 &cp->addr, sizeof(cp->addr));
7749 goto unlock;
7750 }
7751
7752 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7753 addr_type);
7754 if (!params) {
7755 err = mgmt_cmd_complete(sk, hdev->id,
7756 MGMT_OP_REMOVE_DEVICE,
7757 MGMT_STATUS_INVALID_PARAMS,
7758 &cp->addr, sizeof(cp->addr));
7759 goto unlock;
7760 }
7761
7762 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7763 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7764 err = mgmt_cmd_complete(sk, hdev->id,
7765 MGMT_OP_REMOVE_DEVICE,
7766 MGMT_STATUS_INVALID_PARAMS,
7767 &cp->addr, sizeof(cp->addr));
7768 goto unlock;
7769 }
7770
7771 hci_conn_params_free(params);
7772
7773 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7774 } else {
7775 struct hci_conn_params *p, *tmp;
7776 struct bdaddr_list *b, *btmp;
7777
7778 if (cp->addr.type) {
7779 err = mgmt_cmd_complete(sk, hdev->id,
7780 MGMT_OP_REMOVE_DEVICE,
7781 MGMT_STATUS_INVALID_PARAMS,
7782 &cp->addr, sizeof(cp->addr));
7783 goto unlock;
7784 }
7785
7786 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7787 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7788 list_del(&b->list);
7789 kfree(b);
7790 }
7791
7792 hci_update_scan(hdev);
7793
7794 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7795 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7796 continue;
7797 device_removed(sk, hdev, &p->addr, p->addr_type);
7798 if (p->explicit_connect) {
7799 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7800 continue;
7801 }
7802 hci_conn_params_free(p);
7803 }
7804
7805 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7806 }
7807
7808 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7809
7810 complete:
7811 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7812 MGMT_STATUS_SUCCESS, &cp->addr,
7813 sizeof(cp->addr));
7814 unlock:
7815 hci_dev_unlock(hdev);
7816 return err;
7817 }
7818
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7819 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7820 u16 len)
7821 {
7822 struct mgmt_cp_load_conn_param *cp = data;
7823 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7824 sizeof(struct mgmt_conn_param));
7825 u16 param_count, expected_len;
7826 int i;
7827
7828 if (!lmp_le_capable(hdev))
7829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7830 MGMT_STATUS_NOT_SUPPORTED);
7831
7832 param_count = __le16_to_cpu(cp->param_count);
7833 if (param_count > max_param_count) {
7834 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7835 param_count);
7836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7837 MGMT_STATUS_INVALID_PARAMS);
7838 }
7839
7840 expected_len = struct_size(cp, params, param_count);
7841 if (expected_len != len) {
7842 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7843 expected_len, len);
7844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7845 MGMT_STATUS_INVALID_PARAMS);
7846 }
7847
7848 bt_dev_dbg(hdev, "param_count %u", param_count);
7849
7850 hci_dev_lock(hdev);
7851
7852 hci_conn_params_clear_disabled(hdev);
7853
7854 for (i = 0; i < param_count; i++) {
7855 struct mgmt_conn_param *param = &cp->params[i];
7856 struct hci_conn_params *hci_param;
7857 u16 min, max, latency, timeout;
7858 u8 addr_type;
7859
7860 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7861 param->addr.type);
7862
7863 if (param->addr.type == BDADDR_LE_PUBLIC) {
7864 addr_type = ADDR_LE_DEV_PUBLIC;
7865 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7866 addr_type = ADDR_LE_DEV_RANDOM;
7867 } else {
7868 bt_dev_err(hdev, "ignoring invalid connection parameters");
7869 continue;
7870 }
7871
7872 min = le16_to_cpu(param->min_interval);
7873 max = le16_to_cpu(param->max_interval);
7874 latency = le16_to_cpu(param->latency);
7875 timeout = le16_to_cpu(param->timeout);
7876
7877 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7878 min, max, latency, timeout);
7879
7880 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7881 bt_dev_err(hdev, "ignoring invalid connection parameters");
7882 continue;
7883 }
7884
7885 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7886 addr_type);
7887 if (!hci_param) {
7888 bt_dev_err(hdev, "failed to add connection parameters");
7889 continue;
7890 }
7891
7892 hci_param->conn_min_interval = min;
7893 hci_param->conn_max_interval = max;
7894 hci_param->conn_latency = latency;
7895 hci_param->supervision_timeout = timeout;
7896 }
7897
7898 hci_dev_unlock(hdev);
7899
7900 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7901 NULL, 0);
7902 }
7903
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7904 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7905 void *data, u16 len)
7906 {
7907 struct mgmt_cp_set_external_config *cp = data;
7908 bool changed;
7909 int err;
7910
7911 bt_dev_dbg(hdev, "sock %p", sk);
7912
7913 if (hdev_is_powered(hdev))
7914 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7915 MGMT_STATUS_REJECTED);
7916
7917 if (cp->config != 0x00 && cp->config != 0x01)
7918 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7919 MGMT_STATUS_INVALID_PARAMS);
7920
7921 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7922 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7923 MGMT_STATUS_NOT_SUPPORTED);
7924
7925 hci_dev_lock(hdev);
7926
7927 if (cp->config)
7928 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7929 else
7930 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7931
7932 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7933 if (err < 0)
7934 goto unlock;
7935
7936 if (!changed)
7937 goto unlock;
7938
7939 err = new_options(hdev, sk);
7940
7941 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7942 mgmt_index_removed(hdev);
7943
7944 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7945 hci_dev_set_flag(hdev, HCI_CONFIG);
7946 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7947
7948 queue_work(hdev->req_workqueue, &hdev->power_on);
7949 } else {
7950 set_bit(HCI_RAW, &hdev->flags);
7951 mgmt_index_added(hdev);
7952 }
7953 }
7954
7955 unlock:
7956 hci_dev_unlock(hdev);
7957 return err;
7958 }
7959
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7960 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7961 void *data, u16 len)
7962 {
7963 struct mgmt_cp_set_public_address *cp = data;
7964 bool changed;
7965 int err;
7966
7967 bt_dev_dbg(hdev, "sock %p", sk);
7968
7969 if (hdev_is_powered(hdev))
7970 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7971 MGMT_STATUS_REJECTED);
7972
7973 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7974 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7975 MGMT_STATUS_INVALID_PARAMS);
7976
7977 if (!hdev->set_bdaddr)
7978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7979 MGMT_STATUS_NOT_SUPPORTED);
7980
7981 hci_dev_lock(hdev);
7982
7983 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7984 bacpy(&hdev->public_addr, &cp->bdaddr);
7985
7986 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7987 if (err < 0)
7988 goto unlock;
7989
7990 if (!changed)
7991 goto unlock;
7992
7993 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7994 err = new_options(hdev, sk);
7995
7996 if (is_configured(hdev)) {
7997 mgmt_index_removed(hdev);
7998
7999 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8000
8001 hci_dev_set_flag(hdev, HCI_CONFIG);
8002 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8003
8004 queue_work(hdev->req_workqueue, &hdev->power_on);
8005 }
8006
8007 unlock:
8008 hci_dev_unlock(hdev);
8009 return err;
8010 }
8011
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8012 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8013 int err)
8014 {
8015 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8016 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8017 u8 *h192, *r192, *h256, *r256;
8018 struct mgmt_pending_cmd *cmd = data;
8019 struct sk_buff *skb = cmd->skb;
8020 u8 status = mgmt_status(err);
8021 u16 eir_len;
8022
8023 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8024 return;
8025
8026 if (!status) {
8027 if (!skb)
8028 status = MGMT_STATUS_FAILED;
8029 else if (IS_ERR(skb))
8030 status = mgmt_status(PTR_ERR(skb));
8031 else
8032 status = mgmt_status(skb->data[0]);
8033 }
8034
8035 bt_dev_dbg(hdev, "status %u", status);
8036
8037 mgmt_cp = cmd->param;
8038
8039 if (status) {
8040 status = mgmt_status(status);
8041 eir_len = 0;
8042
8043 h192 = NULL;
8044 r192 = NULL;
8045 h256 = NULL;
8046 r256 = NULL;
8047 } else if (!bredr_sc_enabled(hdev)) {
8048 struct hci_rp_read_local_oob_data *rp;
8049
8050 if (skb->len != sizeof(*rp)) {
8051 status = MGMT_STATUS_FAILED;
8052 eir_len = 0;
8053 } else {
8054 status = MGMT_STATUS_SUCCESS;
8055 rp = (void *)skb->data;
8056
8057 eir_len = 5 + 18 + 18;
8058 h192 = rp->hash;
8059 r192 = rp->rand;
8060 h256 = NULL;
8061 r256 = NULL;
8062 }
8063 } else {
8064 struct hci_rp_read_local_oob_ext_data *rp;
8065
8066 if (skb->len != sizeof(*rp)) {
8067 status = MGMT_STATUS_FAILED;
8068 eir_len = 0;
8069 } else {
8070 status = MGMT_STATUS_SUCCESS;
8071 rp = (void *)skb->data;
8072
8073 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8074 eir_len = 5 + 18 + 18;
8075 h192 = NULL;
8076 r192 = NULL;
8077 } else {
8078 eir_len = 5 + 18 + 18 + 18 + 18;
8079 h192 = rp->hash192;
8080 r192 = rp->rand192;
8081 }
8082
8083 h256 = rp->hash256;
8084 r256 = rp->rand256;
8085 }
8086 }
8087
8088 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8089 if (!mgmt_rp)
8090 goto done;
8091
8092 if (eir_len == 0)
8093 goto send_rsp;
8094
8095 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8096 hdev->dev_class, 3);
8097
8098 if (h192 && r192) {
8099 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8100 EIR_SSP_HASH_C192, h192, 16);
8101 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8102 EIR_SSP_RAND_R192, r192, 16);
8103 }
8104
8105 if (h256 && r256) {
8106 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8107 EIR_SSP_HASH_C256, h256, 16);
8108 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8109 EIR_SSP_RAND_R256, r256, 16);
8110 }
8111
8112 send_rsp:
8113 mgmt_rp->type = mgmt_cp->type;
8114 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8115
8116 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8117 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8118 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8119 if (err < 0 || status)
8120 goto done;
8121
8122 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8123
8124 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8125 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8126 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8127 done:
8128 if (skb && !IS_ERR(skb))
8129 kfree_skb(skb);
8130
8131 kfree(mgmt_rp);
8132 mgmt_pending_remove(cmd);
8133 }
8134
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8135 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8136 struct mgmt_cp_read_local_oob_ext_data *cp)
8137 {
8138 struct mgmt_pending_cmd *cmd;
8139 int err;
8140
8141 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8142 cp, sizeof(*cp));
8143 if (!cmd)
8144 return -ENOMEM;
8145
8146 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8147 read_local_oob_ext_data_complete);
8148
8149 if (err < 0) {
8150 mgmt_pending_remove(cmd);
8151 return err;
8152 }
8153
8154 return 0;
8155 }
8156
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8157 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8158 void *data, u16 data_len)
8159 {
8160 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8161 struct mgmt_rp_read_local_oob_ext_data *rp;
8162 size_t rp_len;
8163 u16 eir_len;
8164 u8 status, flags, role, addr[7], hash[16], rand[16];
8165 int err;
8166
8167 bt_dev_dbg(hdev, "sock %p", sk);
8168
8169 if (hdev_is_powered(hdev)) {
8170 switch (cp->type) {
8171 case BIT(BDADDR_BREDR):
8172 status = mgmt_bredr_support(hdev);
8173 if (status)
8174 eir_len = 0;
8175 else
8176 eir_len = 5;
8177 break;
8178 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8179 status = mgmt_le_support(hdev);
8180 if (status)
8181 eir_len = 0;
8182 else
8183 eir_len = 9 + 3 + 18 + 18 + 3;
8184 break;
8185 default:
8186 status = MGMT_STATUS_INVALID_PARAMS;
8187 eir_len = 0;
8188 break;
8189 }
8190 } else {
8191 status = MGMT_STATUS_NOT_POWERED;
8192 eir_len = 0;
8193 }
8194
8195 rp_len = sizeof(*rp) + eir_len;
8196 rp = kmalloc(rp_len, GFP_ATOMIC);
8197 if (!rp)
8198 return -ENOMEM;
8199
8200 if (!status && !lmp_ssp_capable(hdev)) {
8201 status = MGMT_STATUS_NOT_SUPPORTED;
8202 eir_len = 0;
8203 }
8204
8205 if (status)
8206 goto complete;
8207
8208 hci_dev_lock(hdev);
8209
8210 eir_len = 0;
8211 switch (cp->type) {
8212 case BIT(BDADDR_BREDR):
8213 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8214 err = read_local_ssp_oob_req(hdev, sk, cp);
8215 hci_dev_unlock(hdev);
8216 if (!err)
8217 goto done;
8218
8219 status = MGMT_STATUS_FAILED;
8220 goto complete;
8221 } else {
8222 eir_len = eir_append_data(rp->eir, eir_len,
8223 EIR_CLASS_OF_DEV,
8224 hdev->dev_class, 3);
8225 }
8226 break;
8227 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8228 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8229 smp_generate_oob(hdev, hash, rand) < 0) {
8230 hci_dev_unlock(hdev);
8231 status = MGMT_STATUS_FAILED;
8232 goto complete;
8233 }
8234
8235 /* This should return the active RPA, but since the RPA
8236 * is only programmed on demand, it is really hard to fill
8237 * this in at the moment. For now disallow retrieving
8238 * local out-of-band data when privacy is in use.
8239 *
8240 * Returning the identity address will not help here since
8241 * pairing happens before the identity resolving key is
8242 * known and thus the connection establishment happens
8243 * based on the RPA and not the identity address.
8244 */
8245 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8246 hci_dev_unlock(hdev);
8247 status = MGMT_STATUS_REJECTED;
8248 goto complete;
8249 }
8250
8251 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8252 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8253 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8254 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8255 memcpy(addr, &hdev->static_addr, 6);
8256 addr[6] = 0x01;
8257 } else {
8258 memcpy(addr, &hdev->bdaddr, 6);
8259 addr[6] = 0x00;
8260 }
8261
8262 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8263 addr, sizeof(addr));
8264
8265 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8266 role = 0x02;
8267 else
8268 role = 0x01;
8269
8270 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8271 &role, sizeof(role));
8272
8273 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8274 eir_len = eir_append_data(rp->eir, eir_len,
8275 EIR_LE_SC_CONFIRM,
8276 hash, sizeof(hash));
8277
8278 eir_len = eir_append_data(rp->eir, eir_len,
8279 EIR_LE_SC_RANDOM,
8280 rand, sizeof(rand));
8281 }
8282
8283 flags = mgmt_get_adv_discov_flags(hdev);
8284
8285 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8286 flags |= LE_AD_NO_BREDR;
8287
8288 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8289 &flags, sizeof(flags));
8290 break;
8291 }
8292
8293 hci_dev_unlock(hdev);
8294
8295 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8296
8297 status = MGMT_STATUS_SUCCESS;
8298
8299 complete:
8300 rp->type = cp->type;
8301 rp->eir_len = cpu_to_le16(eir_len);
8302
8303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8304 status, rp, sizeof(*rp) + eir_len);
8305 if (err < 0 || status)
8306 goto done;
8307
8308 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8309 rp, sizeof(*rp) + eir_len,
8310 HCI_MGMT_OOB_DATA_EVENTS, sk);
8311
8312 done:
8313 kfree(rp);
8314
8315 return err;
8316 }
8317
get_supported_adv_flags(struct hci_dev * hdev)8318 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8319 {
8320 u32 flags = 0;
8321
8322 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8323 flags |= MGMT_ADV_FLAG_DISCOV;
8324 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8325 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8326 flags |= MGMT_ADV_FLAG_APPEARANCE;
8327 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8328 flags |= MGMT_ADV_PARAM_DURATION;
8329 flags |= MGMT_ADV_PARAM_TIMEOUT;
8330 flags |= MGMT_ADV_PARAM_INTERVALS;
8331 flags |= MGMT_ADV_PARAM_TX_POWER;
8332 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8333
8334 /* In extended adv TX_POWER returned from Set Adv Param
8335 * will be always valid.
8336 */
8337 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8338 flags |= MGMT_ADV_FLAG_TX_POWER;
8339
8340 if (ext_adv_capable(hdev)) {
8341 flags |= MGMT_ADV_FLAG_SEC_1M;
8342 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8343 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8344
8345 if (le_2m_capable(hdev))
8346 flags |= MGMT_ADV_FLAG_SEC_2M;
8347
8348 if (le_coded_capable(hdev))
8349 flags |= MGMT_ADV_FLAG_SEC_CODED;
8350 }
8351
8352 return flags;
8353 }
8354
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8355 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8356 void *data, u16 data_len)
8357 {
8358 struct mgmt_rp_read_adv_features *rp;
8359 size_t rp_len;
8360 int err;
8361 struct adv_info *adv_instance;
8362 u32 supported_flags;
8363 u8 *instance;
8364
8365 bt_dev_dbg(hdev, "sock %p", sk);
8366
8367 if (!lmp_le_capable(hdev))
8368 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8369 MGMT_STATUS_REJECTED);
8370
8371 hci_dev_lock(hdev);
8372
8373 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8374 rp = kmalloc(rp_len, GFP_ATOMIC);
8375 if (!rp) {
8376 hci_dev_unlock(hdev);
8377 return -ENOMEM;
8378 }
8379
8380 supported_flags = get_supported_adv_flags(hdev);
8381
8382 rp->supported_flags = cpu_to_le32(supported_flags);
8383 rp->max_adv_data_len = max_adv_len(hdev);
8384 rp->max_scan_rsp_len = max_adv_len(hdev);
8385 rp->max_instances = hdev->le_num_of_adv_sets;
8386 rp->num_instances = hdev->adv_instance_cnt;
8387
8388 instance = rp->instance;
8389 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8390 /* Only instances 1-le_num_of_adv_sets are externally visible */
8391 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8392 *instance = adv_instance->instance;
8393 instance++;
8394 } else {
8395 rp->num_instances--;
8396 rp_len--;
8397 }
8398 }
8399
8400 hci_dev_unlock(hdev);
8401
8402 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8403 MGMT_STATUS_SUCCESS, rp, rp_len);
8404
8405 kfree(rp);
8406
8407 return err;
8408 }
8409
calculate_name_len(struct hci_dev * hdev)8410 static u8 calculate_name_len(struct hci_dev *hdev)
8411 {
8412 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8413
8414 return eir_append_local_name(hdev, buf, 0);
8415 }
8416
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8417 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8418 bool is_adv_data)
8419 {
8420 u8 max_len = max_adv_len(hdev);
8421
8422 if (is_adv_data) {
8423 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8424 MGMT_ADV_FLAG_LIMITED_DISCOV |
8425 MGMT_ADV_FLAG_MANAGED_FLAGS))
8426 max_len -= 3;
8427
8428 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8429 max_len -= 3;
8430 } else {
8431 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8432 max_len -= calculate_name_len(hdev);
8433
8434 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8435 max_len -= 4;
8436 }
8437
8438 return max_len;
8439 }
8440
flags_managed(u32 adv_flags)8441 static bool flags_managed(u32 adv_flags)
8442 {
8443 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8444 MGMT_ADV_FLAG_LIMITED_DISCOV |
8445 MGMT_ADV_FLAG_MANAGED_FLAGS);
8446 }
8447
tx_power_managed(u32 adv_flags)8448 static bool tx_power_managed(u32 adv_flags)
8449 {
8450 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8451 }
8452
name_managed(u32 adv_flags)8453 static bool name_managed(u32 adv_flags)
8454 {
8455 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8456 }
8457
appearance_managed(u32 adv_flags)8458 static bool appearance_managed(u32 adv_flags)
8459 {
8460 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8461 }
8462
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8463 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8464 u8 len, bool is_adv_data)
8465 {
8466 int i, cur_len;
8467 u8 max_len;
8468
8469 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8470
8471 if (len > max_len)
8472 return false;
8473
8474 /* Make sure that the data is correctly formatted. */
8475 for (i = 0; i < len; i += (cur_len + 1)) {
8476 cur_len = data[i];
8477
8478 if (!cur_len)
8479 continue;
8480
8481 if (data[i + 1] == EIR_FLAGS &&
8482 (!is_adv_data || flags_managed(adv_flags)))
8483 return false;
8484
8485 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8486 return false;
8487
8488 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8489 return false;
8490
8491 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8492 return false;
8493
8494 if (data[i + 1] == EIR_APPEARANCE &&
8495 appearance_managed(adv_flags))
8496 return false;
8497
8498 /* If the current field length would exceed the total data
8499 * length, then it's invalid.
8500 */
8501 if (i + cur_len >= len)
8502 return false;
8503 }
8504
8505 return true;
8506 }
8507
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8508 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8509 {
8510 u32 supported_flags, phy_flags;
8511
8512 /* The current implementation only supports a subset of the specified
8513 * flags. Also need to check mutual exclusiveness of sec flags.
8514 */
8515 supported_flags = get_supported_adv_flags(hdev);
8516 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8517 if (adv_flags & ~supported_flags ||
8518 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8519 return false;
8520
8521 return true;
8522 }
8523
adv_busy(struct hci_dev * hdev)8524 static bool adv_busy(struct hci_dev *hdev)
8525 {
8526 return pending_find(MGMT_OP_SET_LE, hdev);
8527 }
8528
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8529 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8530 int err)
8531 {
8532 struct adv_info *adv, *n;
8533
8534 bt_dev_dbg(hdev, "err %d", err);
8535
8536 hci_dev_lock(hdev);
8537
8538 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8539 u8 instance;
8540
8541 if (!adv->pending)
8542 continue;
8543
8544 if (!err) {
8545 adv->pending = false;
8546 continue;
8547 }
8548
8549 instance = adv->instance;
8550
8551 if (hdev->cur_adv_instance == instance)
8552 cancel_adv_timeout(hdev);
8553
8554 hci_remove_adv_instance(hdev, instance);
8555 mgmt_advertising_removed(sk, hdev, instance);
8556 }
8557
8558 hci_dev_unlock(hdev);
8559 }
8560
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8561 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8562 {
8563 struct mgmt_pending_cmd *cmd = data;
8564 struct mgmt_cp_add_advertising *cp = cmd->param;
8565 struct mgmt_rp_add_advertising rp;
8566
8567 memset(&rp, 0, sizeof(rp));
8568
8569 rp.instance = cp->instance;
8570
8571 if (err)
8572 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8573 mgmt_status(err));
8574 else
8575 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8576 mgmt_status(err), &rp, sizeof(rp));
8577
8578 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8579
8580 mgmt_pending_free(cmd);
8581 }
8582
add_advertising_sync(struct hci_dev * hdev,void * data)8583 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8584 {
8585 struct mgmt_pending_cmd *cmd = data;
8586 struct mgmt_cp_add_advertising *cp = cmd->param;
8587
8588 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8589 }
8590
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8591 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8592 void *data, u16 data_len)
8593 {
8594 struct mgmt_cp_add_advertising *cp = data;
8595 struct mgmt_rp_add_advertising rp;
8596 u32 flags;
8597 u8 status;
8598 u16 timeout, duration;
8599 unsigned int prev_instance_cnt;
8600 u8 schedule_instance = 0;
8601 struct adv_info *adv, *next_instance;
8602 int err;
8603 struct mgmt_pending_cmd *cmd;
8604
8605 bt_dev_dbg(hdev, "sock %p", sk);
8606
8607 status = mgmt_le_support(hdev);
8608 if (status)
8609 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8610 status);
8611
8612 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8613 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8614 MGMT_STATUS_INVALID_PARAMS);
8615
8616 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8617 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8618 MGMT_STATUS_INVALID_PARAMS);
8619
8620 flags = __le32_to_cpu(cp->flags);
8621 timeout = __le16_to_cpu(cp->timeout);
8622 duration = __le16_to_cpu(cp->duration);
8623
8624 if (!requested_adv_flags_are_valid(hdev, flags))
8625 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8626 MGMT_STATUS_INVALID_PARAMS);
8627
8628 hci_dev_lock(hdev);
8629
8630 if (timeout && !hdev_is_powered(hdev)) {
8631 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8632 MGMT_STATUS_REJECTED);
8633 goto unlock;
8634 }
8635
8636 if (adv_busy(hdev)) {
8637 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8638 MGMT_STATUS_BUSY);
8639 goto unlock;
8640 }
8641
8642 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8643 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8644 cp->scan_rsp_len, false)) {
8645 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8646 MGMT_STATUS_INVALID_PARAMS);
8647 goto unlock;
8648 }
8649
8650 prev_instance_cnt = hdev->adv_instance_cnt;
8651
8652 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8653 cp->adv_data_len, cp->data,
8654 cp->scan_rsp_len,
8655 cp->data + cp->adv_data_len,
8656 timeout, duration,
8657 HCI_ADV_TX_POWER_NO_PREFERENCE,
8658 hdev->le_adv_min_interval,
8659 hdev->le_adv_max_interval, 0);
8660 if (IS_ERR(adv)) {
8661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8662 MGMT_STATUS_FAILED);
8663 goto unlock;
8664 }
8665
8666 /* Only trigger an advertising added event if a new instance was
8667 * actually added.
8668 */
8669 if (hdev->adv_instance_cnt > prev_instance_cnt)
8670 mgmt_advertising_added(sk, hdev, cp->instance);
8671
8672 if (hdev->cur_adv_instance == cp->instance) {
8673 /* If the currently advertised instance is being changed then
8674 * cancel the current advertising and schedule the next
8675 * instance. If there is only one instance then the overridden
8676 * advertising data will be visible right away.
8677 */
8678 cancel_adv_timeout(hdev);
8679
8680 next_instance = hci_get_next_instance(hdev, cp->instance);
8681 if (next_instance)
8682 schedule_instance = next_instance->instance;
8683 } else if (!hdev->adv_instance_timeout) {
8684 /* Immediately advertise the new instance if no other
8685 * instance is currently being advertised.
8686 */
8687 schedule_instance = cp->instance;
8688 }
8689
8690 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8691 * there is no instance to be advertised then we have no HCI
8692 * communication to make. Simply return.
8693 */
8694 if (!hdev_is_powered(hdev) ||
8695 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8696 !schedule_instance) {
8697 rp.instance = cp->instance;
8698 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8699 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8700 goto unlock;
8701 }
8702
8703 /* We're good to go, update advertising data, parameters, and start
8704 * advertising.
8705 */
8706 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8707 data_len);
8708 if (!cmd) {
8709 err = -ENOMEM;
8710 goto unlock;
8711 }
8712
8713 cp->instance = schedule_instance;
8714
8715 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8716 add_advertising_complete);
8717 if (err < 0)
8718 mgmt_pending_free(cmd);
8719
8720 unlock:
8721 hci_dev_unlock(hdev);
8722
8723 return err;
8724 }
8725
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8726 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8727 int err)
8728 {
8729 struct mgmt_pending_cmd *cmd = data;
8730 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8731 struct mgmt_rp_add_ext_adv_params rp;
8732 struct adv_info *adv;
8733 u32 flags;
8734
8735 BT_DBG("%s", hdev->name);
8736
8737 hci_dev_lock(hdev);
8738
8739 adv = hci_find_adv_instance(hdev, cp->instance);
8740 if (!adv)
8741 goto unlock;
8742
8743 rp.instance = cp->instance;
8744 rp.tx_power = adv->tx_power;
8745
8746 /* While we're at it, inform userspace of the available space for this
8747 * advertisement, given the flags that will be used.
8748 */
8749 flags = __le32_to_cpu(cp->flags);
8750 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8751 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8752
8753 if (err) {
8754 /* If this advertisement was previously advertising and we
8755 * failed to update it, we signal that it has been removed and
8756 * delete its structure
8757 */
8758 if (!adv->pending)
8759 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8760
8761 hci_remove_adv_instance(hdev, cp->instance);
8762
8763 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8764 mgmt_status(err));
8765 } else {
8766 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8767 mgmt_status(err), &rp, sizeof(rp));
8768 }
8769
8770 unlock:
8771 if (cmd)
8772 mgmt_pending_free(cmd);
8773
8774 hci_dev_unlock(hdev);
8775 }
8776
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8777 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8778 {
8779 struct mgmt_pending_cmd *cmd = data;
8780 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8781
8782 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8783 }
8784
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8785 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8786 void *data, u16 data_len)
8787 {
8788 struct mgmt_cp_add_ext_adv_params *cp = data;
8789 struct mgmt_rp_add_ext_adv_params rp;
8790 struct mgmt_pending_cmd *cmd = NULL;
8791 struct adv_info *adv;
8792 u32 flags, min_interval, max_interval;
8793 u16 timeout, duration;
8794 u8 status;
8795 s8 tx_power;
8796 int err;
8797
8798 BT_DBG("%s", hdev->name);
8799
8800 status = mgmt_le_support(hdev);
8801 if (status)
8802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8803 status);
8804
8805 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8807 MGMT_STATUS_INVALID_PARAMS);
8808
8809 /* The purpose of breaking add_advertising into two separate MGMT calls
8810 * for params and data is to allow more parameters to be added to this
8811 * structure in the future. For this reason, we verify that we have the
8812 * bare minimum structure we know of when the interface was defined. Any
8813 * extra parameters we don't know about will be ignored in this request.
8814 */
8815 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8817 MGMT_STATUS_INVALID_PARAMS);
8818
8819 flags = __le32_to_cpu(cp->flags);
8820
8821 if (!requested_adv_flags_are_valid(hdev, flags))
8822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8823 MGMT_STATUS_INVALID_PARAMS);
8824
8825 hci_dev_lock(hdev);
8826
8827 /* In new interface, we require that we are powered to register */
8828 if (!hdev_is_powered(hdev)) {
8829 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8830 MGMT_STATUS_REJECTED);
8831 goto unlock;
8832 }
8833
8834 if (adv_busy(hdev)) {
8835 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8836 MGMT_STATUS_BUSY);
8837 goto unlock;
8838 }
8839
8840 /* Parse defined parameters from request, use defaults otherwise */
8841 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8842 __le16_to_cpu(cp->timeout) : 0;
8843
8844 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8845 __le16_to_cpu(cp->duration) :
8846 hdev->def_multi_adv_rotation_duration;
8847
8848 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8849 __le32_to_cpu(cp->min_interval) :
8850 hdev->le_adv_min_interval;
8851
8852 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8853 __le32_to_cpu(cp->max_interval) :
8854 hdev->le_adv_max_interval;
8855
8856 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8857 cp->tx_power :
8858 HCI_ADV_TX_POWER_NO_PREFERENCE;
8859
8860 /* Create advertising instance with no advertising or response data */
8861 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8862 timeout, duration, tx_power, min_interval,
8863 max_interval, 0);
8864
8865 if (IS_ERR(adv)) {
8866 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8867 MGMT_STATUS_FAILED);
8868 goto unlock;
8869 }
8870
8871 /* Submit request for advertising params if ext adv available */
8872 if (ext_adv_capable(hdev)) {
8873 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8874 data, data_len);
8875 if (!cmd) {
8876 err = -ENOMEM;
8877 hci_remove_adv_instance(hdev, cp->instance);
8878 goto unlock;
8879 }
8880
8881 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8882 add_ext_adv_params_complete);
8883 if (err < 0)
8884 mgmt_pending_free(cmd);
8885 } else {
8886 rp.instance = cp->instance;
8887 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8888 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8889 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8890 err = mgmt_cmd_complete(sk, hdev->id,
8891 MGMT_OP_ADD_EXT_ADV_PARAMS,
8892 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8893 }
8894
8895 unlock:
8896 hci_dev_unlock(hdev);
8897
8898 return err;
8899 }
8900
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8901 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8902 {
8903 struct mgmt_pending_cmd *cmd = data;
8904 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8905 struct mgmt_rp_add_advertising rp;
8906
8907 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8908
8909 memset(&rp, 0, sizeof(rp));
8910
8911 rp.instance = cp->instance;
8912
8913 if (err)
8914 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8915 mgmt_status(err));
8916 else
8917 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8918 mgmt_status(err), &rp, sizeof(rp));
8919
8920 mgmt_pending_free(cmd);
8921 }
8922
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8923 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8924 {
8925 struct mgmt_pending_cmd *cmd = data;
8926 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8927 int err;
8928
8929 if (ext_adv_capable(hdev)) {
8930 err = hci_update_adv_data_sync(hdev, cp->instance);
8931 if (err)
8932 return err;
8933
8934 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8935 if (err)
8936 return err;
8937
8938 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8939 }
8940
8941 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8942 }
8943
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8944 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8945 u16 data_len)
8946 {
8947 struct mgmt_cp_add_ext_adv_data *cp = data;
8948 struct mgmt_rp_add_ext_adv_data rp;
8949 u8 schedule_instance = 0;
8950 struct adv_info *next_instance;
8951 struct adv_info *adv_instance;
8952 int err = 0;
8953 struct mgmt_pending_cmd *cmd;
8954
8955 BT_DBG("%s", hdev->name);
8956
8957 hci_dev_lock(hdev);
8958
8959 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8960
8961 if (!adv_instance) {
8962 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8963 MGMT_STATUS_INVALID_PARAMS);
8964 goto unlock;
8965 }
8966
8967 /* In new interface, we require that we are powered to register */
8968 if (!hdev_is_powered(hdev)) {
8969 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8970 MGMT_STATUS_REJECTED);
8971 goto clear_new_instance;
8972 }
8973
8974 if (adv_busy(hdev)) {
8975 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8976 MGMT_STATUS_BUSY);
8977 goto clear_new_instance;
8978 }
8979
8980 /* Validate new data */
8981 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8982 cp->adv_data_len, true) ||
8983 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8984 cp->adv_data_len, cp->scan_rsp_len, false)) {
8985 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8986 MGMT_STATUS_INVALID_PARAMS);
8987 goto clear_new_instance;
8988 }
8989
8990 /* Set the data in the advertising instance */
8991 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8992 cp->data, cp->scan_rsp_len,
8993 cp->data + cp->adv_data_len);
8994
8995 /* If using software rotation, determine next instance to use */
8996 if (hdev->cur_adv_instance == cp->instance) {
8997 /* If the currently advertised instance is being changed
8998 * then cancel the current advertising and schedule the
8999 * next instance. If there is only one instance then the
9000 * overridden advertising data will be visible right
9001 * away
9002 */
9003 cancel_adv_timeout(hdev);
9004
9005 next_instance = hci_get_next_instance(hdev, cp->instance);
9006 if (next_instance)
9007 schedule_instance = next_instance->instance;
9008 } else if (!hdev->adv_instance_timeout) {
9009 /* Immediately advertise the new instance if no other
9010 * instance is currently being advertised.
9011 */
9012 schedule_instance = cp->instance;
9013 }
9014
9015 /* If the HCI_ADVERTISING flag is set or there is no instance to
9016 * be advertised then we have no HCI communication to make.
9017 * Simply return.
9018 */
9019 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9020 if (adv_instance->pending) {
9021 mgmt_advertising_added(sk, hdev, cp->instance);
9022 adv_instance->pending = false;
9023 }
9024 rp.instance = cp->instance;
9025 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9026 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9027 goto unlock;
9028 }
9029
9030 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9031 data_len);
9032 if (!cmd) {
9033 err = -ENOMEM;
9034 goto clear_new_instance;
9035 }
9036
9037 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9038 add_ext_adv_data_complete);
9039 if (err < 0) {
9040 mgmt_pending_free(cmd);
9041 goto clear_new_instance;
9042 }
9043
9044 /* We were successful in updating data, so trigger advertising_added
9045 * event if this is an instance that wasn't previously advertising. If
9046 * a failure occurs in the requests we initiated, we will remove the
9047 * instance again in add_advertising_complete
9048 */
9049 if (adv_instance->pending)
9050 mgmt_advertising_added(sk, hdev, cp->instance);
9051
9052 goto unlock;
9053
9054 clear_new_instance:
9055 hci_remove_adv_instance(hdev, cp->instance);
9056
9057 unlock:
9058 hci_dev_unlock(hdev);
9059
9060 return err;
9061 }
9062
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9063 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9064 int err)
9065 {
9066 struct mgmt_pending_cmd *cmd = data;
9067 struct mgmt_cp_remove_advertising *cp = cmd->param;
9068 struct mgmt_rp_remove_advertising rp;
9069
9070 bt_dev_dbg(hdev, "err %d", err);
9071
9072 memset(&rp, 0, sizeof(rp));
9073 rp.instance = cp->instance;
9074
9075 if (err)
9076 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9077 mgmt_status(err));
9078 else
9079 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9080 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9081
9082 mgmt_pending_free(cmd);
9083 }
9084
remove_advertising_sync(struct hci_dev * hdev,void * data)9085 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9086 {
9087 struct mgmt_pending_cmd *cmd = data;
9088 struct mgmt_cp_remove_advertising *cp = cmd->param;
9089 int err;
9090
9091 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9092 if (err)
9093 return err;
9094
9095 if (list_empty(&hdev->adv_instances))
9096 err = hci_disable_advertising_sync(hdev);
9097
9098 return err;
9099 }
9100
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9101 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9102 void *data, u16 data_len)
9103 {
9104 struct mgmt_cp_remove_advertising *cp = data;
9105 struct mgmt_pending_cmd *cmd;
9106 int err;
9107
9108 bt_dev_dbg(hdev, "sock %p", sk);
9109
9110 hci_dev_lock(hdev);
9111
9112 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9113 err = mgmt_cmd_status(sk, hdev->id,
9114 MGMT_OP_REMOVE_ADVERTISING,
9115 MGMT_STATUS_INVALID_PARAMS);
9116 goto unlock;
9117 }
9118
9119 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9121 MGMT_STATUS_BUSY);
9122 goto unlock;
9123 }
9124
9125 if (list_empty(&hdev->adv_instances)) {
9126 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9127 MGMT_STATUS_INVALID_PARAMS);
9128 goto unlock;
9129 }
9130
9131 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9132 data_len);
9133 if (!cmd) {
9134 err = -ENOMEM;
9135 goto unlock;
9136 }
9137
9138 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9139 remove_advertising_complete);
9140 if (err < 0)
9141 mgmt_pending_free(cmd);
9142
9143 unlock:
9144 hci_dev_unlock(hdev);
9145
9146 return err;
9147 }
9148
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9149 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9150 void *data, u16 data_len)
9151 {
9152 struct mgmt_cp_get_adv_size_info *cp = data;
9153 struct mgmt_rp_get_adv_size_info rp;
9154 u32 flags, supported_flags;
9155
9156 bt_dev_dbg(hdev, "sock %p", sk);
9157
9158 if (!lmp_le_capable(hdev))
9159 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9160 MGMT_STATUS_REJECTED);
9161
9162 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9164 MGMT_STATUS_INVALID_PARAMS);
9165
9166 flags = __le32_to_cpu(cp->flags);
9167
9168 /* The current implementation only supports a subset of the specified
9169 * flags.
9170 */
9171 supported_flags = get_supported_adv_flags(hdev);
9172 if (flags & ~supported_flags)
9173 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9174 MGMT_STATUS_INVALID_PARAMS);
9175
9176 rp.instance = cp->instance;
9177 rp.flags = cp->flags;
9178 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9179 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9180
9181 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9182 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9183 }
9184
9185 static const struct hci_mgmt_handler mgmt_handlers[] = {
9186 { NULL }, /* 0x0000 (no command) */
9187 { read_version, MGMT_READ_VERSION_SIZE,
9188 HCI_MGMT_NO_HDEV |
9189 HCI_MGMT_UNTRUSTED },
9190 { read_commands, MGMT_READ_COMMANDS_SIZE,
9191 HCI_MGMT_NO_HDEV |
9192 HCI_MGMT_UNTRUSTED },
9193 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9194 HCI_MGMT_NO_HDEV |
9195 HCI_MGMT_UNTRUSTED },
9196 { read_controller_info, MGMT_READ_INFO_SIZE,
9197 HCI_MGMT_UNTRUSTED },
9198 { set_powered, MGMT_SETTING_SIZE },
9199 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9200 { set_connectable, MGMT_SETTING_SIZE },
9201 { set_fast_connectable, MGMT_SETTING_SIZE },
9202 { set_bondable, MGMT_SETTING_SIZE },
9203 { set_link_security, MGMT_SETTING_SIZE },
9204 { set_ssp, MGMT_SETTING_SIZE },
9205 { set_hs, MGMT_SETTING_SIZE },
9206 { set_le, MGMT_SETTING_SIZE },
9207 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9208 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9209 { add_uuid, MGMT_ADD_UUID_SIZE },
9210 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9211 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9212 HCI_MGMT_VAR_LEN },
9213 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9214 HCI_MGMT_VAR_LEN },
9215 { disconnect, MGMT_DISCONNECT_SIZE },
9216 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9217 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9218 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9219 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9220 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9221 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9222 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9223 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9224 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9225 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9226 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9227 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9228 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9229 HCI_MGMT_VAR_LEN },
9230 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9231 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9232 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9233 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9234 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9235 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9236 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9237 { set_advertising, MGMT_SETTING_SIZE },
9238 { set_bredr, MGMT_SETTING_SIZE },
9239 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9240 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9241 { set_secure_conn, MGMT_SETTING_SIZE },
9242 { set_debug_keys, MGMT_SETTING_SIZE },
9243 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9244 { load_irks, MGMT_LOAD_IRKS_SIZE,
9245 HCI_MGMT_VAR_LEN },
9246 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9247 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9248 { add_device, MGMT_ADD_DEVICE_SIZE },
9249 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9250 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9251 HCI_MGMT_VAR_LEN },
9252 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9253 HCI_MGMT_NO_HDEV |
9254 HCI_MGMT_UNTRUSTED },
9255 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9256 HCI_MGMT_UNCONFIGURED |
9257 HCI_MGMT_UNTRUSTED },
9258 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9259 HCI_MGMT_UNCONFIGURED },
9260 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9261 HCI_MGMT_UNCONFIGURED },
9262 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9263 HCI_MGMT_VAR_LEN },
9264 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9265 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9266 HCI_MGMT_NO_HDEV |
9267 HCI_MGMT_UNTRUSTED },
9268 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9269 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9270 HCI_MGMT_VAR_LEN },
9271 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9272 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9273 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9274 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9275 HCI_MGMT_UNTRUSTED },
9276 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9277 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9278 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9279 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9280 HCI_MGMT_VAR_LEN },
9281 { set_wideband_speech, MGMT_SETTING_SIZE },
9282 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9283 HCI_MGMT_UNTRUSTED },
9284 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9285 HCI_MGMT_UNTRUSTED |
9286 HCI_MGMT_HDEV_OPTIONAL },
9287 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9288 HCI_MGMT_VAR_LEN |
9289 HCI_MGMT_HDEV_OPTIONAL },
9290 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9291 HCI_MGMT_UNTRUSTED },
9292 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9293 HCI_MGMT_VAR_LEN },
9294 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9295 HCI_MGMT_UNTRUSTED },
9296 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9297 HCI_MGMT_VAR_LEN },
9298 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9299 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9300 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9301 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9302 HCI_MGMT_VAR_LEN },
9303 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9304 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9305 HCI_MGMT_VAR_LEN },
9306 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9307 HCI_MGMT_VAR_LEN },
9308 { add_adv_patterns_monitor_rssi,
9309 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9310 HCI_MGMT_VAR_LEN },
9311 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9312 HCI_MGMT_VAR_LEN },
9313 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9314 { mesh_send, MGMT_MESH_SEND_SIZE,
9315 HCI_MGMT_VAR_LEN },
9316 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9317 };
9318
mgmt_index_added(struct hci_dev * hdev)9319 void mgmt_index_added(struct hci_dev *hdev)
9320 {
9321 struct mgmt_ev_ext_index ev;
9322
9323 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9324 return;
9325
9326 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9327 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9328 HCI_MGMT_UNCONF_INDEX_EVENTS);
9329 ev.type = 0x01;
9330 } else {
9331 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9332 HCI_MGMT_INDEX_EVENTS);
9333 ev.type = 0x00;
9334 }
9335
9336 ev.bus = hdev->bus;
9337
9338 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9339 HCI_MGMT_EXT_INDEX_EVENTS);
9340 }
9341
mgmt_index_removed(struct hci_dev * hdev)9342 void mgmt_index_removed(struct hci_dev *hdev)
9343 {
9344 struct mgmt_ev_ext_index ev;
9345 u8 status = MGMT_STATUS_INVALID_INDEX;
9346
9347 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9348 return;
9349
9350 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9351
9352 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9353 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9354 HCI_MGMT_UNCONF_INDEX_EVENTS);
9355 ev.type = 0x01;
9356 } else {
9357 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9358 HCI_MGMT_INDEX_EVENTS);
9359 ev.type = 0x00;
9360 }
9361
9362 ev.bus = hdev->bus;
9363
9364 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9365 HCI_MGMT_EXT_INDEX_EVENTS);
9366
9367 /* Cancel any remaining timed work */
9368 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9369 return;
9370 cancel_delayed_work_sync(&hdev->discov_off);
9371 cancel_delayed_work_sync(&hdev->service_cache);
9372 cancel_delayed_work_sync(&hdev->rpa_expired);
9373 }
9374
mgmt_power_on(struct hci_dev * hdev,int err)9375 void mgmt_power_on(struct hci_dev *hdev, int err)
9376 {
9377 struct cmd_lookup match = { NULL, hdev };
9378
9379 bt_dev_dbg(hdev, "err %d", err);
9380
9381 hci_dev_lock(hdev);
9382
9383 if (!err) {
9384 restart_le_actions(hdev);
9385 hci_update_passive_scan(hdev);
9386 }
9387
9388 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9389
9390 new_settings(hdev, match.sk);
9391
9392 if (match.sk)
9393 sock_put(match.sk);
9394
9395 hci_dev_unlock(hdev);
9396 }
9397
__mgmt_power_off(struct hci_dev * hdev)9398 void __mgmt_power_off(struct hci_dev *hdev)
9399 {
9400 struct cmd_lookup match = { NULL, hdev };
9401 u8 status, zero_cod[] = { 0, 0, 0 };
9402
9403 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9404
9405 /* If the power off is because of hdev unregistration let
9406 * use the appropriate INVALID_INDEX status. Otherwise use
9407 * NOT_POWERED. We cover both scenarios here since later in
9408 * mgmt_index_removed() any hci_conn callbacks will have already
9409 * been triggered, potentially causing misleading DISCONNECTED
9410 * status responses.
9411 */
9412 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9413 status = MGMT_STATUS_INVALID_INDEX;
9414 else
9415 status = MGMT_STATUS_NOT_POWERED;
9416
9417 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9418
9419 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9420 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9421 zero_cod, sizeof(zero_cod),
9422 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9423 ext_info_changed(hdev, NULL);
9424 }
9425
9426 new_settings(hdev, match.sk);
9427
9428 if (match.sk)
9429 sock_put(match.sk);
9430 }
9431
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9432 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9433 {
9434 struct mgmt_pending_cmd *cmd;
9435 u8 status;
9436
9437 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9438 if (!cmd)
9439 return;
9440
9441 if (err == -ERFKILL)
9442 status = MGMT_STATUS_RFKILLED;
9443 else
9444 status = MGMT_STATUS_FAILED;
9445
9446 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9447
9448 mgmt_pending_remove(cmd);
9449 }
9450
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9451 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9452 bool persistent)
9453 {
9454 struct mgmt_ev_new_link_key ev;
9455
9456 memset(&ev, 0, sizeof(ev));
9457
9458 ev.store_hint = persistent;
9459 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9460 ev.key.addr.type = BDADDR_BREDR;
9461 ev.key.type = key->type;
9462 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9463 ev.key.pin_len = key->pin_len;
9464
9465 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9466 }
9467
mgmt_ltk_type(struct smp_ltk * ltk)9468 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9469 {
9470 switch (ltk->type) {
9471 case SMP_LTK:
9472 case SMP_LTK_RESPONDER:
9473 if (ltk->authenticated)
9474 return MGMT_LTK_AUTHENTICATED;
9475 return MGMT_LTK_UNAUTHENTICATED;
9476 case SMP_LTK_P256:
9477 if (ltk->authenticated)
9478 return MGMT_LTK_P256_AUTH;
9479 return MGMT_LTK_P256_UNAUTH;
9480 case SMP_LTK_P256_DEBUG:
9481 return MGMT_LTK_P256_DEBUG;
9482 }
9483
9484 return MGMT_LTK_UNAUTHENTICATED;
9485 }
9486
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9487 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9488 {
9489 struct mgmt_ev_new_long_term_key ev;
9490
9491 memset(&ev, 0, sizeof(ev));
9492
9493 /* Devices using resolvable or non-resolvable random addresses
9494 * without providing an identity resolving key don't require
9495 * to store long term keys. Their addresses will change the
9496 * next time around.
9497 *
9498 * Only when a remote device provides an identity address
9499 * make sure the long term key is stored. If the remote
9500 * identity is known, the long term keys are internally
9501 * mapped to the identity address. So allow static random
9502 * and public addresses here.
9503 */
9504 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9505 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9506 ev.store_hint = 0x00;
9507 else
9508 ev.store_hint = persistent;
9509
9510 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9511 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9512 ev.key.type = mgmt_ltk_type(key);
9513 ev.key.enc_size = key->enc_size;
9514 ev.key.ediv = key->ediv;
9515 ev.key.rand = key->rand;
9516
9517 if (key->type == SMP_LTK)
9518 ev.key.initiator = 1;
9519
9520 /* Make sure we copy only the significant bytes based on the
9521 * encryption key size, and set the rest of the value to zeroes.
9522 */
9523 memcpy(ev.key.val, key->val, key->enc_size);
9524 memset(ev.key.val + key->enc_size, 0,
9525 sizeof(ev.key.val) - key->enc_size);
9526
9527 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9528 }
9529
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9530 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9531 {
9532 struct mgmt_ev_new_irk ev;
9533
9534 memset(&ev, 0, sizeof(ev));
9535
9536 ev.store_hint = persistent;
9537
9538 bacpy(&ev.rpa, &irk->rpa);
9539 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9540 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9541 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9542
9543 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9544 }
9545
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9546 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9547 bool persistent)
9548 {
9549 struct mgmt_ev_new_csrk ev;
9550
9551 memset(&ev, 0, sizeof(ev));
9552
9553 /* Devices using resolvable or non-resolvable random addresses
9554 * without providing an identity resolving key don't require
9555 * to store signature resolving keys. Their addresses will change
9556 * the next time around.
9557 *
9558 * Only when a remote device provides an identity address
9559 * make sure the signature resolving key is stored. So allow
9560 * static random and public addresses here.
9561 */
9562 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9563 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9564 ev.store_hint = 0x00;
9565 else
9566 ev.store_hint = persistent;
9567
9568 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9569 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9570 ev.key.type = csrk->type;
9571 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9572
9573 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9574 }
9575
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9576 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9577 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9578 u16 max_interval, u16 latency, u16 timeout)
9579 {
9580 struct mgmt_ev_new_conn_param ev;
9581
9582 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9583 return;
9584
9585 memset(&ev, 0, sizeof(ev));
9586 bacpy(&ev.addr.bdaddr, bdaddr);
9587 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9588 ev.store_hint = store_hint;
9589 ev.min_interval = cpu_to_le16(min_interval);
9590 ev.max_interval = cpu_to_le16(max_interval);
9591 ev.latency = cpu_to_le16(latency);
9592 ev.timeout = cpu_to_le16(timeout);
9593
9594 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9595 }
9596
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9597 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9598 u8 *name, u8 name_len)
9599 {
9600 struct sk_buff *skb;
9601 struct mgmt_ev_device_connected *ev;
9602 u16 eir_len = 0;
9603 u32 flags = 0;
9604
9605 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9606 return;
9607
9608 /* allocate buff for LE or BR/EDR adv */
9609 if (conn->le_adv_data_len > 0)
9610 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9611 sizeof(*ev) + conn->le_adv_data_len);
9612 else
9613 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9614 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9615 eir_precalc_len(sizeof(conn->dev_class)));
9616
9617 ev = skb_put(skb, sizeof(*ev));
9618 bacpy(&ev->addr.bdaddr, &conn->dst);
9619 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9620
9621 if (conn->out)
9622 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9623
9624 ev->flags = __cpu_to_le32(flags);
9625
9626 /* We must ensure that the EIR Data fields are ordered and
9627 * unique. Keep it simple for now and avoid the problem by not
9628 * adding any BR/EDR data to the LE adv.
9629 */
9630 if (conn->le_adv_data_len > 0) {
9631 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9632 eir_len = conn->le_adv_data_len;
9633 } else {
9634 if (name)
9635 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9636
9637 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9638 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9639 conn->dev_class, sizeof(conn->dev_class));
9640 }
9641
9642 ev->eir_len = cpu_to_le16(eir_len);
9643
9644 mgmt_event_skb(skb, NULL);
9645 }
9646
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9647 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9648 {
9649 struct hci_dev *hdev = data;
9650 struct mgmt_cp_unpair_device *cp = cmd->param;
9651
9652 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9653
9654 cmd->cmd_complete(cmd, 0);
9655 mgmt_pending_remove(cmd);
9656 }
9657
mgmt_powering_down(struct hci_dev * hdev)9658 bool mgmt_powering_down(struct hci_dev *hdev)
9659 {
9660 struct mgmt_pending_cmd *cmd;
9661 struct mgmt_mode *cp;
9662
9663 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9664 if (!cmd)
9665 return false;
9666
9667 cp = cmd->param;
9668 if (!cp->val)
9669 return true;
9670
9671 return false;
9672 }
9673
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9674 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9675 u8 link_type, u8 addr_type, u8 reason,
9676 bool mgmt_connected)
9677 {
9678 struct mgmt_ev_device_disconnected ev;
9679 struct sock *sk = NULL;
9680
9681 if (!mgmt_connected)
9682 return;
9683
9684 if (link_type != ACL_LINK && link_type != LE_LINK)
9685 return;
9686
9687 bacpy(&ev.addr.bdaddr, bdaddr);
9688 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9689 ev.reason = reason;
9690
9691 /* Report disconnects due to suspend */
9692 if (hdev->suspended)
9693 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9694
9695 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9696
9697 if (sk)
9698 sock_put(sk);
9699 }
9700
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9701 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9702 u8 link_type, u8 addr_type, u8 status)
9703 {
9704 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9705 struct mgmt_cp_disconnect *cp;
9706 struct mgmt_pending_cmd *cmd;
9707
9708 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9709 hdev);
9710
9711 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9712 if (!cmd)
9713 return;
9714
9715 cp = cmd->param;
9716
9717 if (bacmp(bdaddr, &cp->addr.bdaddr))
9718 return;
9719
9720 if (cp->addr.type != bdaddr_type)
9721 return;
9722
9723 cmd->cmd_complete(cmd, mgmt_status(status));
9724 mgmt_pending_remove(cmd);
9725 }
9726
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9727 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9728 u8 addr_type, u8 status)
9729 {
9730 struct mgmt_ev_connect_failed ev;
9731
9732 bacpy(&ev.addr.bdaddr, bdaddr);
9733 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9734 ev.status = mgmt_status(status);
9735
9736 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9737 }
9738
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9739 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9740 {
9741 struct mgmt_ev_pin_code_request ev;
9742
9743 bacpy(&ev.addr.bdaddr, bdaddr);
9744 ev.addr.type = BDADDR_BREDR;
9745 ev.secure = secure;
9746
9747 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9748 }
9749
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9750 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9751 u8 status)
9752 {
9753 struct mgmt_pending_cmd *cmd;
9754
9755 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9756 if (!cmd)
9757 return;
9758
9759 cmd->cmd_complete(cmd, mgmt_status(status));
9760 mgmt_pending_remove(cmd);
9761 }
9762
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9763 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9764 u8 status)
9765 {
9766 struct mgmt_pending_cmd *cmd;
9767
9768 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9769 if (!cmd)
9770 return;
9771
9772 cmd->cmd_complete(cmd, mgmt_status(status));
9773 mgmt_pending_remove(cmd);
9774 }
9775
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9776 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9777 u8 link_type, u8 addr_type, u32 value,
9778 u8 confirm_hint)
9779 {
9780 struct mgmt_ev_user_confirm_request ev;
9781
9782 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9783
9784 bacpy(&ev.addr.bdaddr, bdaddr);
9785 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9786 ev.confirm_hint = confirm_hint;
9787 ev.value = cpu_to_le32(value);
9788
9789 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9790 NULL);
9791 }
9792
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9793 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9794 u8 link_type, u8 addr_type)
9795 {
9796 struct mgmt_ev_user_passkey_request ev;
9797
9798 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9799
9800 bacpy(&ev.addr.bdaddr, bdaddr);
9801 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9802
9803 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9804 NULL);
9805 }
9806
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9807 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9808 u8 link_type, u8 addr_type, u8 status,
9809 u8 opcode)
9810 {
9811 struct mgmt_pending_cmd *cmd;
9812
9813 cmd = pending_find(opcode, hdev);
9814 if (!cmd)
9815 return -ENOENT;
9816
9817 cmd->cmd_complete(cmd, mgmt_status(status));
9818 mgmt_pending_remove(cmd);
9819
9820 return 0;
9821 }
9822
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9823 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9824 u8 link_type, u8 addr_type, u8 status)
9825 {
9826 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9827 status, MGMT_OP_USER_CONFIRM_REPLY);
9828 }
9829
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9830 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9831 u8 link_type, u8 addr_type, u8 status)
9832 {
9833 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9834 status,
9835 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9836 }
9837
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9838 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9839 u8 link_type, u8 addr_type, u8 status)
9840 {
9841 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9842 status, MGMT_OP_USER_PASSKEY_REPLY);
9843 }
9844
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9845 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9846 u8 link_type, u8 addr_type, u8 status)
9847 {
9848 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9849 status,
9850 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9851 }
9852
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9853 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 u8 link_type, u8 addr_type, u32 passkey,
9855 u8 entered)
9856 {
9857 struct mgmt_ev_passkey_notify ev;
9858
9859 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9860
9861 bacpy(&ev.addr.bdaddr, bdaddr);
9862 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9863 ev.passkey = __cpu_to_le32(passkey);
9864 ev.entered = entered;
9865
9866 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9867 }
9868
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9869 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9870 {
9871 struct mgmt_ev_auth_failed ev;
9872 struct mgmt_pending_cmd *cmd;
9873 u8 status = mgmt_status(hci_status);
9874
9875 bacpy(&ev.addr.bdaddr, &conn->dst);
9876 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9877 ev.status = status;
9878
9879 cmd = find_pairing(conn);
9880
9881 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9882 cmd ? cmd->sk : NULL);
9883
9884 if (cmd) {
9885 cmd->cmd_complete(cmd, status);
9886 mgmt_pending_remove(cmd);
9887 }
9888 }
9889
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9890 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9891 {
9892 struct cmd_lookup match = { NULL, hdev };
9893 bool changed;
9894
9895 if (status) {
9896 u8 mgmt_err = mgmt_status(status);
9897 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9898 cmd_status_rsp, &mgmt_err);
9899 return;
9900 }
9901
9902 if (test_bit(HCI_AUTH, &hdev->flags))
9903 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9904 else
9905 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9906
9907 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9908 &match);
9909
9910 if (changed)
9911 new_settings(hdev, match.sk);
9912
9913 if (match.sk)
9914 sock_put(match.sk);
9915 }
9916
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9917 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9918 {
9919 struct cmd_lookup *match = data;
9920
9921 if (match->sk == NULL) {
9922 match->sk = cmd->sk;
9923 sock_hold(match->sk);
9924 }
9925 }
9926
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9927 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9928 u8 status)
9929 {
9930 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9931
9932 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9933 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9934 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9935
9936 if (!status) {
9937 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9938 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9939 ext_info_changed(hdev, NULL);
9940 }
9941
9942 if (match.sk)
9943 sock_put(match.sk);
9944 }
9945
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9946 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9947 {
9948 struct mgmt_cp_set_local_name ev;
9949 struct mgmt_pending_cmd *cmd;
9950
9951 if (status)
9952 return;
9953
9954 memset(&ev, 0, sizeof(ev));
9955 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9956 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9957
9958 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9959 if (!cmd) {
9960 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9961
9962 /* If this is a HCI command related to powering on the
9963 * HCI dev don't send any mgmt signals.
9964 */
9965 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9966 return;
9967 }
9968
9969 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9970 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9971 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9972 }
9973
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])9974 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9975 {
9976 int i;
9977
9978 for (i = 0; i < uuid_count; i++) {
9979 if (!memcmp(uuid, uuids[i], 16))
9980 return true;
9981 }
9982
9983 return false;
9984 }
9985
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])9986 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9987 {
9988 u16 parsed = 0;
9989
9990 while (parsed < eir_len) {
9991 u8 field_len = eir[0];
9992 u8 uuid[16];
9993 int i;
9994
9995 if (field_len == 0)
9996 break;
9997
9998 if (eir_len - parsed < field_len + 1)
9999 break;
10000
10001 switch (eir[1]) {
10002 case EIR_UUID16_ALL:
10003 case EIR_UUID16_SOME:
10004 for (i = 0; i + 3 <= field_len; i += 2) {
10005 memcpy(uuid, bluetooth_base_uuid, 16);
10006 uuid[13] = eir[i + 3];
10007 uuid[12] = eir[i + 2];
10008 if (has_uuid(uuid, uuid_count, uuids))
10009 return true;
10010 }
10011 break;
10012 case EIR_UUID32_ALL:
10013 case EIR_UUID32_SOME:
10014 for (i = 0; i + 5 <= field_len; i += 4) {
10015 memcpy(uuid, bluetooth_base_uuid, 16);
10016 uuid[15] = eir[i + 5];
10017 uuid[14] = eir[i + 4];
10018 uuid[13] = eir[i + 3];
10019 uuid[12] = eir[i + 2];
10020 if (has_uuid(uuid, uuid_count, uuids))
10021 return true;
10022 }
10023 break;
10024 case EIR_UUID128_ALL:
10025 case EIR_UUID128_SOME:
10026 for (i = 0; i + 17 <= field_len; i += 16) {
10027 memcpy(uuid, eir + i + 2, 16);
10028 if (has_uuid(uuid, uuid_count, uuids))
10029 return true;
10030 }
10031 break;
10032 }
10033
10034 parsed += field_len + 1;
10035 eir += field_len + 1;
10036 }
10037
10038 return false;
10039 }
10040
restart_le_scan(struct hci_dev * hdev)10041 static void restart_le_scan(struct hci_dev *hdev)
10042 {
10043 /* If controller is not scanning we are done. */
10044 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10045 return;
10046
10047 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10048 hdev->discovery.scan_start +
10049 hdev->discovery.scan_duration))
10050 return;
10051
10052 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10053 DISCOV_LE_RESTART_DELAY);
10054 }
10055
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10056 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10057 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10058 {
10059 /* If a RSSI threshold has been specified, and
10060 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10061 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10062 * is set, let it through for further processing, as we might need to
10063 * restart the scan.
10064 *
10065 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10066 * the results are also dropped.
10067 */
10068 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10069 (rssi == HCI_RSSI_INVALID ||
10070 (rssi < hdev->discovery.rssi &&
10071 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10072 return false;
10073
10074 if (hdev->discovery.uuid_count != 0) {
10075 /* If a list of UUIDs is provided in filter, results with no
10076 * matching UUID should be dropped.
10077 */
10078 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10079 hdev->discovery.uuids) &&
10080 !eir_has_uuids(scan_rsp, scan_rsp_len,
10081 hdev->discovery.uuid_count,
10082 hdev->discovery.uuids))
10083 return false;
10084 }
10085
10086 /* If duplicate filtering does not report RSSI changes, then restart
10087 * scanning to ensure updated result with updated RSSI values.
10088 */
10089 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10090 restart_le_scan(hdev);
10091
10092 /* Validate RSSI value against the RSSI threshold once more. */
10093 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10094 rssi < hdev->discovery.rssi)
10095 return false;
10096 }
10097
10098 return true;
10099 }
10100
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10101 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10102 bdaddr_t *bdaddr, u8 addr_type)
10103 {
10104 struct mgmt_ev_adv_monitor_device_lost ev;
10105
10106 ev.monitor_handle = cpu_to_le16(handle);
10107 bacpy(&ev.addr.bdaddr, bdaddr);
10108 ev.addr.type = addr_type;
10109
10110 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10111 NULL);
10112 }
10113
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10114 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10115 struct sk_buff *skb,
10116 struct sock *skip_sk,
10117 u16 handle)
10118 {
10119 struct sk_buff *advmon_skb;
10120 size_t advmon_skb_len;
10121 __le16 *monitor_handle;
10122
10123 if (!skb)
10124 return;
10125
10126 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10127 sizeof(struct mgmt_ev_device_found)) + skb->len;
10128 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10129 advmon_skb_len);
10130 if (!advmon_skb)
10131 return;
10132
10133 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10134 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10135 * store monitor_handle of the matched monitor.
10136 */
10137 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10138 *monitor_handle = cpu_to_le16(handle);
10139 skb_put_data(advmon_skb, skb->data, skb->len);
10140
10141 mgmt_event_skb(advmon_skb, skip_sk);
10142 }
10143
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10144 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10145 bdaddr_t *bdaddr, bool report_device,
10146 struct sk_buff *skb,
10147 struct sock *skip_sk)
10148 {
10149 struct monitored_device *dev, *tmp;
10150 bool matched = false;
10151 bool notified = false;
10152
10153 /* We have received the Advertisement Report because:
10154 * 1. the kernel has initiated active discovery
10155 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10156 * passive scanning
10157 * 3. if none of the above is true, we have one or more active
10158 * Advertisement Monitor
10159 *
10160 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10161 * and report ONLY one advertisement per device for the matched Monitor
10162 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10163 *
10164 * For case 3, since we are not active scanning and all advertisements
10165 * received are due to a matched Advertisement Monitor, report all
10166 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10167 */
10168 if (report_device && !hdev->advmon_pend_notify) {
10169 mgmt_event_skb(skb, skip_sk);
10170 return;
10171 }
10172
10173 hdev->advmon_pend_notify = false;
10174
10175 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10176 if (!bacmp(&dev->bdaddr, bdaddr)) {
10177 matched = true;
10178
10179 if (!dev->notified) {
10180 mgmt_send_adv_monitor_device_found(hdev, skb,
10181 skip_sk,
10182 dev->handle);
10183 notified = true;
10184 dev->notified = true;
10185 }
10186 }
10187
10188 if (!dev->notified)
10189 hdev->advmon_pend_notify = true;
10190 }
10191
10192 if (!report_device &&
10193 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10194 /* Handle 0 indicates that we are not active scanning and this
10195 * is a subsequent advertisement report for an already matched
10196 * Advertisement Monitor or the controller offloading support
10197 * is not available.
10198 */
10199 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10200 }
10201
10202 if (report_device)
10203 mgmt_event_skb(skb, skip_sk);
10204 else
10205 kfree_skb(skb);
10206 }
10207
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10208 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10209 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10210 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10211 u64 instant)
10212 {
10213 struct sk_buff *skb;
10214 struct mgmt_ev_mesh_device_found *ev;
10215 int i, j;
10216
10217 if (!hdev->mesh_ad_types[0])
10218 goto accepted;
10219
10220 /* Scan for requested AD types */
10221 if (eir_len > 0) {
10222 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10223 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10224 if (!hdev->mesh_ad_types[j])
10225 break;
10226
10227 if (hdev->mesh_ad_types[j] == eir[i + 1])
10228 goto accepted;
10229 }
10230 }
10231 }
10232
10233 if (scan_rsp_len > 0) {
10234 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10235 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10236 if (!hdev->mesh_ad_types[j])
10237 break;
10238
10239 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10240 goto accepted;
10241 }
10242 }
10243 }
10244
10245 return;
10246
10247 accepted:
10248 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10249 sizeof(*ev) + eir_len + scan_rsp_len);
10250 if (!skb)
10251 return;
10252
10253 ev = skb_put(skb, sizeof(*ev));
10254
10255 bacpy(&ev->addr.bdaddr, bdaddr);
10256 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10257 ev->rssi = rssi;
10258 ev->flags = cpu_to_le32(flags);
10259 ev->instant = cpu_to_le64(instant);
10260
10261 if (eir_len > 0)
10262 /* Copy EIR or advertising data into event */
10263 skb_put_data(skb, eir, eir_len);
10264
10265 if (scan_rsp_len > 0)
10266 /* Append scan response data to event */
10267 skb_put_data(skb, scan_rsp, scan_rsp_len);
10268
10269 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10270
10271 mgmt_event_skb(skb, NULL);
10272 }
10273
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10274 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10275 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10276 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10277 u64 instant)
10278 {
10279 struct sk_buff *skb;
10280 struct mgmt_ev_device_found *ev;
10281 bool report_device = hci_discovery_active(hdev);
10282
10283 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10284 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10285 eir, eir_len, scan_rsp, scan_rsp_len,
10286 instant);
10287
10288 /* Don't send events for a non-kernel initiated discovery. With
10289 * LE one exception is if we have pend_le_reports > 0 in which
10290 * case we're doing passive scanning and want these events.
10291 */
10292 if (!hci_discovery_active(hdev)) {
10293 if (link_type == ACL_LINK)
10294 return;
10295 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10296 report_device = true;
10297 else if (!hci_is_adv_monitoring(hdev))
10298 return;
10299 }
10300
10301 if (hdev->discovery.result_filtering) {
10302 /* We are using service discovery */
10303 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10304 scan_rsp_len))
10305 return;
10306 }
10307
10308 if (hdev->discovery.limited) {
10309 /* Check for limited discoverable bit */
10310 if (dev_class) {
10311 if (!(dev_class[1] & 0x20))
10312 return;
10313 } else {
10314 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10315 if (!flags || !(flags[0] & LE_AD_LIMITED))
10316 return;
10317 }
10318 }
10319
10320 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10321 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10322 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10323 if (!skb)
10324 return;
10325
10326 ev = skb_put(skb, sizeof(*ev));
10327
10328 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10329 * RSSI value was reported as 0 when not available. This behavior
10330 * is kept when using device discovery. This is required for full
10331 * backwards compatibility with the API.
10332 *
10333 * However when using service discovery, the value 127 will be
10334 * returned when the RSSI is not available.
10335 */
10336 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10337 link_type == ACL_LINK)
10338 rssi = 0;
10339
10340 bacpy(&ev->addr.bdaddr, bdaddr);
10341 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10342 ev->rssi = rssi;
10343 ev->flags = cpu_to_le32(flags);
10344
10345 if (eir_len > 0)
10346 /* Copy EIR or advertising data into event */
10347 skb_put_data(skb, eir, eir_len);
10348
10349 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10350 u8 eir_cod[5];
10351
10352 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10353 dev_class, 3);
10354 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10355 }
10356
10357 if (scan_rsp_len > 0)
10358 /* Append scan response data to event */
10359 skb_put_data(skb, scan_rsp, scan_rsp_len);
10360
10361 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10362
10363 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10364 }
10365
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10366 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10367 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10368 {
10369 struct sk_buff *skb;
10370 struct mgmt_ev_device_found *ev;
10371 u16 eir_len = 0;
10372 u32 flags = 0;
10373
10374 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10375 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10376
10377 ev = skb_put(skb, sizeof(*ev));
10378 bacpy(&ev->addr.bdaddr, bdaddr);
10379 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10380 ev->rssi = rssi;
10381
10382 if (name)
10383 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10384 else
10385 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10386
10387 ev->eir_len = cpu_to_le16(eir_len);
10388 ev->flags = cpu_to_le32(flags);
10389
10390 mgmt_event_skb(skb, NULL);
10391 }
10392
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10393 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10394 {
10395 struct mgmt_ev_discovering ev;
10396
10397 bt_dev_dbg(hdev, "discovering %u", discovering);
10398
10399 memset(&ev, 0, sizeof(ev));
10400 ev.type = hdev->discovery.type;
10401 ev.discovering = discovering;
10402
10403 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10404 }
10405
mgmt_suspending(struct hci_dev * hdev,u8 state)10406 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10407 {
10408 struct mgmt_ev_controller_suspend ev;
10409
10410 ev.suspend_state = state;
10411 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10412 }
10413
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10414 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10415 u8 addr_type)
10416 {
10417 struct mgmt_ev_controller_resume ev;
10418
10419 ev.wake_reason = reason;
10420 if (bdaddr) {
10421 bacpy(&ev.addr.bdaddr, bdaddr);
10422 ev.addr.type = addr_type;
10423 } else {
10424 memset(&ev.addr, 0, sizeof(ev.addr));
10425 }
10426
10427 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10428 }
10429
10430 static struct hci_mgmt_chan chan = {
10431 .channel = HCI_CHANNEL_CONTROL,
10432 .handler_count = ARRAY_SIZE(mgmt_handlers),
10433 .handlers = mgmt_handlers,
10434 .hdev_init = mgmt_init_hdev,
10435 };
10436
mgmt_init(void)10437 int mgmt_init(void)
10438 {
10439 return hci_mgmt_chan_register(&chan);
10440 }
10441
mgmt_exit(void)10442 void mgmt_exit(void)
10443 {
10444 hci_mgmt_chan_unregister(&chan);
10445 }
10446
mgmt_cleanup(struct sock * sk)10447 void mgmt_cleanup(struct sock *sk)
10448 {
10449 struct mgmt_mesh_tx *mesh_tx;
10450 struct hci_dev *hdev;
10451
10452 read_lock(&hci_dev_list_lock);
10453
10454 list_for_each_entry(hdev, &hci_dev_list, list) {
10455 do {
10456 mesh_tx = mgmt_mesh_next(hdev, sk);
10457
10458 if (mesh_tx)
10459 mesh_send_complete(hdev, mesh_tx, true);
10460 } while (mesh_tx);
10461 }
10462
10463 read_unlock(&hci_dev_list_lock);
10464 }
10465