1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
46
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855
856 return settings;
857 }
858
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 u32 settings = 0;
862
863 if (hdev_is_powered(hdev))
864 settings |= MGMT_SETTING_POWERED;
865
866 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 settings |= MGMT_SETTING_CONNECTABLE;
868
869 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 settings |= MGMT_SETTING_DISCOVERABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 settings |= MGMT_SETTING_BONDABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 settings |= MGMT_SETTING_BREDR;
880
881 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 settings |= MGMT_SETTING_LE;
883
884 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 settings |= MGMT_SETTING_LINK_SECURITY;
886
887 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 settings |= MGMT_SETTING_SSP;
889
890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 settings |= MGMT_SETTING_ADVERTISING;
892
893 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 settings |= MGMT_SETTING_SECURE_CONN;
895
896 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 settings |= MGMT_SETTING_DEBUG_KEYS;
898
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 settings |= MGMT_SETTING_PRIVACY;
901
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
905 *
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
909 *
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
912 * be evaluated.
913 */
914 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 settings |= MGMT_SETTING_STATIC_ADDRESS;
919 }
920
921 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923
924 if (cis_central_capable(hdev))
925 settings |= MGMT_SETTING_CIS_CENTRAL;
926
927 if (cis_peripheral_capable(hdev))
928 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929
930 if (bis_capable(hdev))
931 settings |= MGMT_SETTING_ISO_BROADCASTER;
932
933 if (sync_recv_capable(hdev))
934 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935
936 return settings;
937 }
938
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 struct mgmt_pending_cmd *cmd;
947
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
950 */
951 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (cmd) {
953 struct mgmt_mode *cp = cmd->param;
954 if (cp->val == 0x01)
955 return LE_AD_GENERAL;
956 else if (cp->val == 0x02)
957 return LE_AD_LIMITED;
958 } else {
959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 return LE_AD_LIMITED;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 return LE_AD_GENERAL;
963 }
964
965 return 0;
966 }
967
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 struct mgmt_pending_cmd *cmd;
971
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
974 */
975 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 if (cmd) {
977 struct mgmt_mode *cp = cmd->param;
978
979 return cp->val;
980 }
981
982 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 hci_update_eir_sync(hdev);
988 hci_update_class_sync(hdev);
989
990 return 0;
991 }
992
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 service_cache.work);
997
998 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 return;
1000
1001 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1008 * function.
1009 */
1010 if (ext_adv_capable(hdev))
1011 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 else
1013 return hci_enable_advertising_sync(hdev);
1014 }
1015
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 rpa_expired.work);
1020
1021 bt_dev_dbg(hdev, "");
1022
1023 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024
1025 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 return;
1027
1028 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 discov_off.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_lock(hdev);
1041
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1046 */
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1050
1051 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052
1053 mgmt_new_settings(hdev);
1054
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 u8 handle = mesh_tx->handle;
1064
1065 if (!silent)
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1068
1069 mgmt_mesh_remove(mesh_tx);
1070 }
1071
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 struct mgmt_mesh_tx *mesh_tx;
1075
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079
1080 if (mesh_tx)
1081 mesh_send_complete(hdev, mesh_tx, false);
1082
1083 return 0;
1084 }
1085
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (!mesh_tx)
1093 return;
1094
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1097
1098 if (err < 0)
1099 mesh_send_complete(hdev, mesh_tx, false);
1100 else
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1108
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 return;
1111
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 return;
1119
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1130 * it
1131 */
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1139 {
1140 struct mgmt_rp_read_info rp;
1141
1142 bt_dev_dbg(hdev, "sock %p", sk);
1143
1144 hci_dev_lock(hdev);
1145
1146 memset(&rp, 0, sizeof(rp));
1147
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1157
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160
1161 hci_dev_unlock(hdev);
1162
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 sizeof(rp));
1165 }
1166
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 u16 eir_len = 0;
1170 size_t name_len;
1171
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1175
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 hdev->appearance);
1179
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1183
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1187
1188 return eir_len;
1189 }
1190
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1193 {
1194 char buf[512];
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 u16 eir_len;
1197
1198 bt_dev_dbg(hdev, "sock %p", sk);
1199
1200 memset(&buf, 0, sizeof(buf));
1201
1202 hci_dev_lock(hdev);
1203
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211
1212
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1215
1216 hci_dev_unlock(hdev);
1217
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1221 * is used.
1222 */
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1229 }
1230
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 char buf[512];
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 u16 eir_len;
1236
1237 memset(buf, 0, sizeof(buf));
1238
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1241
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 struct mgmt_ev_advertising_added ev;
1258
1259 ev.instance = instance;
1260
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 u8 instance)
1266 {
1267 struct mgmt_ev_advertising_removed ev;
1268
1269 ev.instance = instance;
1270
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1279 }
1280 }
1281
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 struct hci_conn_params *p;
1286
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1290 */
1291 hci_pend_le_list_del_init(p);
1292
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 break;
1298 case HCI_AUTO_CONN_REPORT:
1299 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 break;
1301 default:
1302 break;
1303 }
1304 }
1305 }
1306
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1319
1320 /* Make sure cmd still outstanding. */
1321 if (err == -ECANCELED ||
1322 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 return;
1324
1325 cp = cmd->param;
1326
1327 bt_dev_dbg(hdev, "err %d", err);
1328
1329 if (!err) {
1330 if (cp->val) {
1331 hci_dev_lock(hdev);
1332 restart_le_actions(hdev);
1333 hci_update_passive_scan(hdev);
1334 hci_dev_unlock(hdev);
1335 }
1336
1337 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338
1339 /* Only call new_setting for power on as power off is deferred
1340 * to hdev->power_off work which does call hci_dev_do_close.
1341 */
1342 if (cp->val)
1343 new_settings(hdev, cmd->sk);
1344 } else {
1345 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 mgmt_status(err));
1347 }
1348
1349 mgmt_pending_remove(cmd);
1350 }
1351
set_powered_sync(struct hci_dev * hdev,void * data)1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 struct mgmt_pending_cmd *cmd = data;
1355 struct mgmt_mode *cp;
1356
1357 /* Make sure cmd still outstanding. */
1358 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 return -ECANCELED;
1360
1361 cp = cmd->param;
1362
1363 BT_DBG("%s", hdev->name);
1364
1365 return hci_set_powered_sync(hdev, cp->val);
1366 }
1367
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 u16 len)
1370 {
1371 struct mgmt_mode *cp = data;
1372 struct mgmt_pending_cmd *cmd;
1373 int err;
1374
1375 bt_dev_dbg(hdev, "sock %p", sk);
1376
1377 if (cp->val != 0x00 && cp->val != 0x01)
1378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 MGMT_STATUS_INVALID_PARAMS);
1380
1381 hci_dev_lock(hdev);
1382
1383 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_BUSY);
1386 goto failed;
1387 }
1388
1389 if (!!cp->val == hdev_is_powered(hdev)) {
1390 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 goto failed;
1392 }
1393
1394 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 if (!cmd) {
1396 err = -ENOMEM;
1397 goto failed;
1398 }
1399
1400 /* Cancel potentially blocking sync operation before power off */
1401 if (cp->val == 0x00) {
1402 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 mgmt_set_powered_complete);
1405 } else {
1406 /* Use hci_cmd_sync_submit since hdev might not be running */
1407 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 mgmt_set_powered_complete);
1409 }
1410
1411 if (err < 0)
1412 mgmt_pending_remove(cmd);
1413
1414 failed:
1415 hci_dev_unlock(hdev);
1416 return err;
1417 }
1418
mgmt_new_settings(struct hci_dev * hdev)1419 int mgmt_new_settings(struct hci_dev *hdev)
1420 {
1421 return new_settings(hdev, NULL);
1422 }
1423
1424 struct cmd_lookup {
1425 struct sock *sk;
1426 struct hci_dev *hdev;
1427 u8 mgmt_status;
1428 };
1429
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 {
1432 struct cmd_lookup *match = data;
1433
1434 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435
1436 list_del(&cmd->list);
1437
1438 if (match->sk == NULL) {
1439 match->sk = cmd->sk;
1440 sock_hold(match->sk);
1441 }
1442
1443 mgmt_pending_free(cmd);
1444 }
1445
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1446 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447 {
1448 u8 *status = data;
1449
1450 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 mgmt_pending_remove(cmd);
1452 }
1453
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 struct cmd_lookup *match = data;
1457
1458 /* dequeue cmd_sync entries using cmd as data as that is about to be
1459 * removed/freed.
1460 */
1461 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1462
1463 if (cmd->cmd_complete) {
1464 cmd->cmd_complete(cmd, match->mgmt_status);
1465 mgmt_pending_remove(cmd);
1466
1467 return;
1468 }
1469
1470 cmd_status_rsp(cmd, data);
1471 }
1472
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 cmd->param, cmd->param_len);
1477 }
1478
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 cmd->param, sizeof(struct mgmt_addr_info));
1483 }
1484
mgmt_bredr_support(struct hci_dev * hdev)1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 {
1487 if (!lmp_bredr_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 return MGMT_STATUS_REJECTED;
1491 else
1492 return MGMT_STATUS_SUCCESS;
1493 }
1494
mgmt_le_support(struct hci_dev * hdev)1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 {
1497 if (!lmp_le_capable(hdev))
1498 return MGMT_STATUS_NOT_SUPPORTED;
1499 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 return MGMT_STATUS_REJECTED;
1501 else
1502 return MGMT_STATUS_SUCCESS;
1503 }
1504
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1506 int err)
1507 {
1508 struct mgmt_pending_cmd *cmd = data;
1509
1510 bt_dev_dbg(hdev, "err %d", err);
1511
1512 /* Make sure cmd still outstanding. */
1513 if (err == -ECANCELED ||
1514 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 if (err) {
1520 u8 mgmt_err = mgmt_status(err);
1521 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1522 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1523 goto done;
1524 }
1525
1526 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1527 hdev->discov_timeout > 0) {
1528 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1529 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1530 }
1531
1532 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1533 new_settings(hdev, cmd->sk);
1534
1535 done:
1536 mgmt_pending_remove(cmd);
1537 hci_dev_unlock(hdev);
1538 }
1539
set_discoverable_sync(struct hci_dev * hdev,void * data)1540 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1541 {
1542 BT_DBG("%s", hdev->name);
1543
1544 return hci_update_discoverable_sync(hdev);
1545 }
1546
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1547 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1548 u16 len)
1549 {
1550 struct mgmt_cp_set_discoverable *cp = data;
1551 struct mgmt_pending_cmd *cmd;
1552 u16 timeout;
1553 int err;
1554
1555 bt_dev_dbg(hdev, "sock %p", sk);
1556
1557 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1558 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560 MGMT_STATUS_REJECTED);
1561
1562 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1564 MGMT_STATUS_INVALID_PARAMS);
1565
1566 timeout = __le16_to_cpu(cp->timeout);
1567
1568 /* Disabling discoverable requires that no timeout is set,
1569 * and enabling limited discoverable requires a timeout.
1570 */
1571 if ((cp->val == 0x00 && timeout > 0) ||
1572 (cp->val == 0x02 && timeout == 0))
1573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_INVALID_PARAMS);
1575
1576 hci_dev_lock(hdev);
1577
1578 if (!hdev_is_powered(hdev) && timeout > 0) {
1579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_NOT_POWERED);
1581 goto failed;
1582 }
1583
1584 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1585 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_BUSY);
1588 goto failed;
1589 }
1590
1591 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_REJECTED);
1594 goto failed;
1595 }
1596
1597 if (hdev->advertising_paused) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_BUSY);
1600 goto failed;
1601 }
1602
1603 if (!hdev_is_powered(hdev)) {
1604 bool changed = false;
1605
1606 /* Setting limited discoverable when powered off is
1607 * not a valid operation since it requires a timeout
1608 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1609 */
1610 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1611 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1612 changed = true;
1613 }
1614
1615 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 if (err < 0)
1617 goto failed;
1618
1619 if (changed)
1620 err = new_settings(hdev, sk);
1621
1622 goto failed;
1623 }
1624
1625 /* If the current mode is the same, then just update the timeout
1626 * value with the new value. And if only the timeout gets updated,
1627 * then no need for any HCI transactions.
1628 */
1629 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1630 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1631 HCI_LIMITED_DISCOVERABLE)) {
1632 cancel_delayed_work(&hdev->discov_off);
1633 hdev->discov_timeout = timeout;
1634
1635 if (cp->val && hdev->discov_timeout > 0) {
1636 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1637 queue_delayed_work(hdev->req_workqueue,
1638 &hdev->discov_off, to);
1639 }
1640
1641 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1642 goto failed;
1643 }
1644
1645 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1646 if (!cmd) {
1647 err = -ENOMEM;
1648 goto failed;
1649 }
1650
1651 /* Cancel any potential discoverable timeout that might be
1652 * still active and store new timeout value. The arming of
1653 * the timeout happens in the complete handler.
1654 */
1655 cancel_delayed_work(&hdev->discov_off);
1656 hdev->discov_timeout = timeout;
1657
1658 if (cp->val)
1659 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1660 else
1661 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1662
1663 /* Limited discoverable mode */
1664 if (cp->val == 0x02)
1665 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1666 else
1667 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1668
1669 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1670 mgmt_set_discoverable_complete);
1671
1672 if (err < 0)
1673 mgmt_pending_remove(cmd);
1674
1675 failed:
1676 hci_dev_unlock(hdev);
1677 return err;
1678 }
1679
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1680 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1681 int err)
1682 {
1683 struct mgmt_pending_cmd *cmd = data;
1684
1685 bt_dev_dbg(hdev, "err %d", err);
1686
1687 /* Make sure cmd still outstanding. */
1688 if (err == -ECANCELED ||
1689 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1690 return;
1691
1692 hci_dev_lock(hdev);
1693
1694 if (err) {
1695 u8 mgmt_err = mgmt_status(err);
1696 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1697 goto done;
1698 }
1699
1700 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1701 new_settings(hdev, cmd->sk);
1702
1703 done:
1704 if (cmd)
1705 mgmt_pending_remove(cmd);
1706
1707 hci_dev_unlock(hdev);
1708 }
1709
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1710 static int set_connectable_update_settings(struct hci_dev *hdev,
1711 struct sock *sk, u8 val)
1712 {
1713 bool changed = false;
1714 int err;
1715
1716 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1717 changed = true;
1718
1719 if (val) {
1720 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1721 } else {
1722 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1723 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1724 }
1725
1726 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1727 if (err < 0)
1728 return err;
1729
1730 if (changed) {
1731 hci_update_scan(hdev);
1732 hci_update_passive_scan(hdev);
1733 return new_settings(hdev, sk);
1734 }
1735
1736 return 0;
1737 }
1738
set_connectable_sync(struct hci_dev * hdev,void * data)1739 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1740 {
1741 BT_DBG("%s", hdev->name);
1742
1743 return hci_update_connectable_sync(hdev);
1744 }
1745
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1746 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1747 u16 len)
1748 {
1749 struct mgmt_mode *cp = data;
1750 struct mgmt_pending_cmd *cmd;
1751 int err;
1752
1753 bt_dev_dbg(hdev, "sock %p", sk);
1754
1755 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1756 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1758 MGMT_STATUS_REJECTED);
1759
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1762 MGMT_STATUS_INVALID_PARAMS);
1763
1764 hci_dev_lock(hdev);
1765
1766 if (!hdev_is_powered(hdev)) {
1767 err = set_connectable_update_settings(hdev, sk, cp->val);
1768 goto failed;
1769 }
1770
1771 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1772 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1773 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1774 MGMT_STATUS_BUSY);
1775 goto failed;
1776 }
1777
1778 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1779 if (!cmd) {
1780 err = -ENOMEM;
1781 goto failed;
1782 }
1783
1784 if (cp->val) {
1785 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1786 } else {
1787 if (hdev->discov_timeout > 0)
1788 cancel_delayed_work(&hdev->discov_off);
1789
1790 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1791 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1792 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1793 }
1794
1795 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1796 mgmt_set_connectable_complete);
1797
1798 if (err < 0)
1799 mgmt_pending_remove(cmd);
1800
1801 failed:
1802 hci_dev_unlock(hdev);
1803 return err;
1804 }
1805
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1806 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1807 u16 len)
1808 {
1809 struct mgmt_mode *cp = data;
1810 bool changed;
1811 int err;
1812
1813 bt_dev_dbg(hdev, "sock %p", sk);
1814
1815 if (cp->val != 0x00 && cp->val != 0x01)
1816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1817 MGMT_STATUS_INVALID_PARAMS);
1818
1819 hci_dev_lock(hdev);
1820
1821 if (cp->val)
1822 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1823 else
1824 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1825
1826 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1827 if (err < 0)
1828 goto unlock;
1829
1830 if (changed) {
1831 /* In limited privacy mode the change of bondable mode
1832 * may affect the local advertising address.
1833 */
1834 hci_update_discoverable(hdev);
1835
1836 err = new_settings(hdev, sk);
1837 }
1838
1839 unlock:
1840 hci_dev_unlock(hdev);
1841 return err;
1842 }
1843
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1844 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1845 u16 len)
1846 {
1847 struct mgmt_mode *cp = data;
1848 struct mgmt_pending_cmd *cmd;
1849 u8 val, status;
1850 int err;
1851
1852 bt_dev_dbg(hdev, "sock %p", sk);
1853
1854 status = mgmt_bredr_support(hdev);
1855 if (status)
1856 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1857 status);
1858
1859 if (cp->val != 0x00 && cp->val != 0x01)
1860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1861 MGMT_STATUS_INVALID_PARAMS);
1862
1863 hci_dev_lock(hdev);
1864
1865 if (!hdev_is_powered(hdev)) {
1866 bool changed = false;
1867
1868 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1869 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1870 changed = true;
1871 }
1872
1873 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1874 if (err < 0)
1875 goto failed;
1876
1877 if (changed)
1878 err = new_settings(hdev, sk);
1879
1880 goto failed;
1881 }
1882
1883 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1884 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1885 MGMT_STATUS_BUSY);
1886 goto failed;
1887 }
1888
1889 val = !!cp->val;
1890
1891 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1892 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1893 goto failed;
1894 }
1895
1896 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1897 if (!cmd) {
1898 err = -ENOMEM;
1899 goto failed;
1900 }
1901
1902 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1903 if (err < 0) {
1904 mgmt_pending_remove(cmd);
1905 goto failed;
1906 }
1907
1908 failed:
1909 hci_dev_unlock(hdev);
1910 return err;
1911 }
1912
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1913 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1914 {
1915 struct cmd_lookup match = { NULL, hdev };
1916 struct mgmt_pending_cmd *cmd = data;
1917 struct mgmt_mode *cp = cmd->param;
1918 u8 enable = cp->val;
1919 bool changed;
1920
1921 /* Make sure cmd still outstanding. */
1922 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1923 return;
1924
1925 if (err) {
1926 u8 mgmt_err = mgmt_status(err);
1927
1928 if (enable && hci_dev_test_and_clear_flag(hdev,
1929 HCI_SSP_ENABLED)) {
1930 new_settings(hdev, NULL);
1931 }
1932
1933 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1934 &mgmt_err);
1935 return;
1936 }
1937
1938 if (enable) {
1939 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1940 } else {
1941 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1942 }
1943
1944 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1945
1946 if (changed)
1947 new_settings(hdev, match.sk);
1948
1949 if (match.sk)
1950 sock_put(match.sk);
1951
1952 hci_update_eir_sync(hdev);
1953 }
1954
set_ssp_sync(struct hci_dev * hdev,void * data)1955 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1956 {
1957 struct mgmt_pending_cmd *cmd = data;
1958 struct mgmt_mode *cp = cmd->param;
1959 bool changed = false;
1960 int err;
1961
1962 if (cp->val)
1963 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1964
1965 err = hci_write_ssp_mode_sync(hdev, cp->val);
1966
1967 if (!err && changed)
1968 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1969
1970 return err;
1971 }
1972
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1973 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1974 {
1975 struct mgmt_mode *cp = data;
1976 struct mgmt_pending_cmd *cmd;
1977 u8 status;
1978 int err;
1979
1980 bt_dev_dbg(hdev, "sock %p", sk);
1981
1982 status = mgmt_bredr_support(hdev);
1983 if (status)
1984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1985
1986 if (!lmp_ssp_capable(hdev))
1987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1988 MGMT_STATUS_NOT_SUPPORTED);
1989
1990 if (cp->val != 0x00 && cp->val != 0x01)
1991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1992 MGMT_STATUS_INVALID_PARAMS);
1993
1994 hci_dev_lock(hdev);
1995
1996 if (!hdev_is_powered(hdev)) {
1997 bool changed;
1998
1999 if (cp->val) {
2000 changed = !hci_dev_test_and_set_flag(hdev,
2001 HCI_SSP_ENABLED);
2002 } else {
2003 changed = hci_dev_test_and_clear_flag(hdev,
2004 HCI_SSP_ENABLED);
2005 }
2006
2007 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2008 if (err < 0)
2009 goto failed;
2010
2011 if (changed)
2012 err = new_settings(hdev, sk);
2013
2014 goto failed;
2015 }
2016
2017 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2018 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 MGMT_STATUS_BUSY);
2020 goto failed;
2021 }
2022
2023 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2024 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2025 goto failed;
2026 }
2027
2028 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2029 if (!cmd)
2030 err = -ENOMEM;
2031 else
2032 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2033 set_ssp_complete);
2034
2035 if (err < 0) {
2036 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2037 MGMT_STATUS_FAILED);
2038
2039 if (cmd)
2040 mgmt_pending_remove(cmd);
2041 }
2042
2043 failed:
2044 hci_dev_unlock(hdev);
2045 return err;
2046 }
2047
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2048 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2049 {
2050 bt_dev_dbg(hdev, "sock %p", sk);
2051
2052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2053 MGMT_STATUS_NOT_SUPPORTED);
2054 }
2055
set_le_complete(struct hci_dev * hdev,void * data,int err)2056 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2057 {
2058 struct cmd_lookup match = { NULL, hdev };
2059 u8 status = mgmt_status(err);
2060
2061 bt_dev_dbg(hdev, "err %d", err);
2062
2063 if (status) {
2064 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2065 &status);
2066 return;
2067 }
2068
2069 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2070
2071 new_settings(hdev, match.sk);
2072
2073 if (match.sk)
2074 sock_put(match.sk);
2075 }
2076
set_le_sync(struct hci_dev * hdev,void * data)2077 static int set_le_sync(struct hci_dev *hdev, void *data)
2078 {
2079 struct mgmt_pending_cmd *cmd = data;
2080 struct mgmt_mode *cp = cmd->param;
2081 u8 val = !!cp->val;
2082 int err;
2083
2084 if (!val) {
2085 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2086
2087 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2088 hci_disable_advertising_sync(hdev);
2089
2090 if (ext_adv_capable(hdev))
2091 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2092 } else {
2093 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2094 }
2095
2096 err = hci_write_le_host_supported_sync(hdev, val, 0);
2097
2098 /* Make sure the controller has a good default for
2099 * advertising data. Restrict the update to when LE
2100 * has actually been enabled. During power on, the
2101 * update in powered_update_hci will take care of it.
2102 */
2103 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2104 if (ext_adv_capable(hdev)) {
2105 int status;
2106
2107 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2108 if (!status)
2109 hci_update_scan_rsp_data_sync(hdev, 0x00);
2110 } else {
2111 hci_update_adv_data_sync(hdev, 0x00);
2112 hci_update_scan_rsp_data_sync(hdev, 0x00);
2113 }
2114
2115 hci_update_passive_scan(hdev);
2116 }
2117
2118 return err;
2119 }
2120
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2121 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2122 {
2123 struct mgmt_pending_cmd *cmd = data;
2124 u8 status = mgmt_status(err);
2125 struct sock *sk = cmd->sk;
2126
2127 if (status) {
2128 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2129 cmd_status_rsp, &status);
2130 return;
2131 }
2132
2133 mgmt_pending_remove(cmd);
2134 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2135 }
2136
set_mesh_sync(struct hci_dev * hdev,void * data)2137 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2138 {
2139 struct mgmt_pending_cmd *cmd = data;
2140 struct mgmt_cp_set_mesh *cp = cmd->param;
2141 size_t len = cmd->param_len;
2142
2143 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2144
2145 if (cp->enable)
2146 hci_dev_set_flag(hdev, HCI_MESH);
2147 else
2148 hci_dev_clear_flag(hdev, HCI_MESH);
2149
2150 len -= sizeof(*cp);
2151
2152 /* If filters don't fit, forward all adv pkts */
2153 if (len <= sizeof(hdev->mesh_ad_types))
2154 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2155
2156 hci_update_passive_scan_sync(hdev);
2157 return 0;
2158 }
2159
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2160 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2161 {
2162 struct mgmt_cp_set_mesh *cp = data;
2163 struct mgmt_pending_cmd *cmd;
2164 int err = 0;
2165
2166 bt_dev_dbg(hdev, "sock %p", sk);
2167
2168 if (!lmp_le_capable(hdev) ||
2169 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2170 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2171 MGMT_STATUS_NOT_SUPPORTED);
2172
2173 if (cp->enable != 0x00 && cp->enable != 0x01)
2174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2175 MGMT_STATUS_INVALID_PARAMS);
2176
2177 hci_dev_lock(hdev);
2178
2179 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2180 if (!cmd)
2181 err = -ENOMEM;
2182 else
2183 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2184 set_mesh_complete);
2185
2186 if (err < 0) {
2187 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2188 MGMT_STATUS_FAILED);
2189
2190 if (cmd)
2191 mgmt_pending_remove(cmd);
2192 }
2193
2194 hci_dev_unlock(hdev);
2195 return err;
2196 }
2197
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2198 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2199 {
2200 struct mgmt_mesh_tx *mesh_tx = data;
2201 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2202 unsigned long mesh_send_interval;
2203 u8 mgmt_err = mgmt_status(err);
2204
2205 /* Report any errors here, but don't report completion */
2206
2207 if (mgmt_err) {
2208 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2209 /* Send Complete Error Code for handle */
2210 mesh_send_complete(hdev, mesh_tx, false);
2211 return;
2212 }
2213
2214 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2215 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2216 mesh_send_interval);
2217 }
2218
mesh_send_sync(struct hci_dev * hdev,void * data)2219 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2220 {
2221 struct mgmt_mesh_tx *mesh_tx = data;
2222 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2223 struct adv_info *adv, *next_instance;
2224 u8 instance = hdev->le_num_of_adv_sets + 1;
2225 u16 timeout, duration;
2226 int err = 0;
2227
2228 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2229 return MGMT_STATUS_BUSY;
2230
2231 timeout = 1000;
2232 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2233 adv = hci_add_adv_instance(hdev, instance, 0,
2234 send->adv_data_len, send->adv_data,
2235 0, NULL,
2236 timeout, duration,
2237 HCI_ADV_TX_POWER_NO_PREFERENCE,
2238 hdev->le_adv_min_interval,
2239 hdev->le_adv_max_interval,
2240 mesh_tx->handle);
2241
2242 if (!IS_ERR(adv))
2243 mesh_tx->instance = instance;
2244 else
2245 err = PTR_ERR(adv);
2246
2247 if (hdev->cur_adv_instance == instance) {
2248 /* If the currently advertised instance is being changed then
2249 * cancel the current advertising and schedule the next
2250 * instance. If there is only one instance then the overridden
2251 * advertising data will be visible right away.
2252 */
2253 cancel_adv_timeout(hdev);
2254
2255 next_instance = hci_get_next_instance(hdev, instance);
2256 if (next_instance)
2257 instance = next_instance->instance;
2258 else
2259 instance = 0;
2260 } else if (hdev->adv_instance_timeout) {
2261 /* Immediately advertise the new instance if no other, or
2262 * let it go naturally from queue if ADV is already happening
2263 */
2264 instance = 0;
2265 }
2266
2267 if (instance)
2268 return hci_schedule_adv_instance_sync(hdev, instance, true);
2269
2270 return err;
2271 }
2272
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2273 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2274 {
2275 struct mgmt_rp_mesh_read_features *rp = data;
2276
2277 if (rp->used_handles >= rp->max_handles)
2278 return;
2279
2280 rp->handles[rp->used_handles++] = mesh_tx->handle;
2281 }
2282
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2283 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2284 void *data, u16 len)
2285 {
2286 struct mgmt_rp_mesh_read_features rp;
2287
2288 if (!lmp_le_capable(hdev) ||
2289 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2290 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2291 MGMT_STATUS_NOT_SUPPORTED);
2292
2293 memset(&rp, 0, sizeof(rp));
2294 rp.index = cpu_to_le16(hdev->id);
2295 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2296 rp.max_handles = MESH_HANDLES_MAX;
2297
2298 hci_dev_lock(hdev);
2299
2300 if (rp.max_handles)
2301 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2302
2303 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2304 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2305
2306 hci_dev_unlock(hdev);
2307 return 0;
2308 }
2309
send_cancel(struct hci_dev * hdev,void * data)2310 static int send_cancel(struct hci_dev *hdev, void *data)
2311 {
2312 struct mgmt_pending_cmd *cmd = data;
2313 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2314 struct mgmt_mesh_tx *mesh_tx;
2315
2316 if (!cancel->handle) {
2317 do {
2318 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2319
2320 if (mesh_tx)
2321 mesh_send_complete(hdev, mesh_tx, false);
2322 } while (mesh_tx);
2323 } else {
2324 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2325
2326 if (mesh_tx && mesh_tx->sk == cmd->sk)
2327 mesh_send_complete(hdev, mesh_tx, false);
2328 }
2329
2330 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2331 0, NULL, 0);
2332 mgmt_pending_free(cmd);
2333
2334 return 0;
2335 }
2336
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2337 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2338 void *data, u16 len)
2339 {
2340 struct mgmt_pending_cmd *cmd;
2341 int err;
2342
2343 if (!lmp_le_capable(hdev) ||
2344 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2346 MGMT_STATUS_NOT_SUPPORTED);
2347
2348 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2350 MGMT_STATUS_REJECTED);
2351
2352 hci_dev_lock(hdev);
2353 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2354 if (!cmd)
2355 err = -ENOMEM;
2356 else
2357 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2358
2359 if (err < 0) {
2360 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2361 MGMT_STATUS_FAILED);
2362
2363 if (cmd)
2364 mgmt_pending_free(cmd);
2365 }
2366
2367 hci_dev_unlock(hdev);
2368 return err;
2369 }
2370
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2371 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2372 {
2373 struct mgmt_mesh_tx *mesh_tx;
2374 struct mgmt_cp_mesh_send *send = data;
2375 struct mgmt_rp_mesh_read_features rp;
2376 bool sending;
2377 int err = 0;
2378
2379 if (!lmp_le_capable(hdev) ||
2380 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2381 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2382 MGMT_STATUS_NOT_SUPPORTED);
2383 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2384 len <= MGMT_MESH_SEND_SIZE ||
2385 len > (MGMT_MESH_SEND_SIZE + 31))
2386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2387 MGMT_STATUS_REJECTED);
2388
2389 hci_dev_lock(hdev);
2390
2391 memset(&rp, 0, sizeof(rp));
2392 rp.max_handles = MESH_HANDLES_MAX;
2393
2394 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2395
2396 if (rp.max_handles <= rp.used_handles) {
2397 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2398 MGMT_STATUS_BUSY);
2399 goto done;
2400 }
2401
2402 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2403 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2404
2405 if (!mesh_tx)
2406 err = -ENOMEM;
2407 else if (!sending)
2408 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2409 mesh_send_start_complete);
2410
2411 if (err < 0) {
2412 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2413 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2414 MGMT_STATUS_FAILED);
2415
2416 if (mesh_tx) {
2417 if (sending)
2418 mgmt_mesh_remove(mesh_tx);
2419 }
2420 } else {
2421 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2422
2423 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2424 &mesh_tx->handle, 1);
2425 }
2426
2427 done:
2428 hci_dev_unlock(hdev);
2429 return err;
2430 }
2431
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2432 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2433 {
2434 struct mgmt_mode *cp = data;
2435 struct mgmt_pending_cmd *cmd;
2436 int err;
2437 u8 val, enabled;
2438
2439 bt_dev_dbg(hdev, "sock %p", sk);
2440
2441 if (!lmp_le_capable(hdev))
2442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2443 MGMT_STATUS_NOT_SUPPORTED);
2444
2445 if (cp->val != 0x00 && cp->val != 0x01)
2446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2447 MGMT_STATUS_INVALID_PARAMS);
2448
2449 /* Bluetooth single mode LE only controllers or dual-mode
2450 * controllers configured as LE only devices, do not allow
2451 * switching LE off. These have either LE enabled explicitly
2452 * or BR/EDR has been previously switched off.
2453 *
2454 * When trying to enable an already enabled LE, then gracefully
2455 * send a positive response. Trying to disable it however will
2456 * result into rejection.
2457 */
2458 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2459 if (cp->val == 0x01)
2460 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2461
2462 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2463 MGMT_STATUS_REJECTED);
2464 }
2465
2466 hci_dev_lock(hdev);
2467
2468 val = !!cp->val;
2469 enabled = lmp_host_le_capable(hdev);
2470
2471 if (!hdev_is_powered(hdev) || val == enabled) {
2472 bool changed = false;
2473
2474 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2475 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2476 changed = true;
2477 }
2478
2479 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2480 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2481 changed = true;
2482 }
2483
2484 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2485 if (err < 0)
2486 goto unlock;
2487
2488 if (changed)
2489 err = new_settings(hdev, sk);
2490
2491 goto unlock;
2492 }
2493
2494 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2495 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2496 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2497 MGMT_STATUS_BUSY);
2498 goto unlock;
2499 }
2500
2501 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2502 if (!cmd)
2503 err = -ENOMEM;
2504 else
2505 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2506 set_le_complete);
2507
2508 if (err < 0) {
2509 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2510 MGMT_STATUS_FAILED);
2511
2512 if (cmd)
2513 mgmt_pending_remove(cmd);
2514 }
2515
2516 unlock:
2517 hci_dev_unlock(hdev);
2518 return err;
2519 }
2520
2521 /* This is a helper function to test for pending mgmt commands that can
2522 * cause CoD or EIR HCI commands. We can only allow one such pending
2523 * mgmt command at a time since otherwise we cannot easily track what
2524 * the current values are, will be, and based on that calculate if a new
2525 * HCI command needs to be sent and if yes with what value.
2526 */
pending_eir_or_class(struct hci_dev * hdev)2527 static bool pending_eir_or_class(struct hci_dev *hdev)
2528 {
2529 struct mgmt_pending_cmd *cmd;
2530
2531 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2532 switch (cmd->opcode) {
2533 case MGMT_OP_ADD_UUID:
2534 case MGMT_OP_REMOVE_UUID:
2535 case MGMT_OP_SET_DEV_CLASS:
2536 case MGMT_OP_SET_POWERED:
2537 return true;
2538 }
2539 }
2540
2541 return false;
2542 }
2543
2544 static const u8 bluetooth_base_uuid[] = {
2545 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2546 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2547 };
2548
get_uuid_size(const u8 * uuid)2549 static u8 get_uuid_size(const u8 *uuid)
2550 {
2551 u32 val;
2552
2553 if (memcmp(uuid, bluetooth_base_uuid, 12))
2554 return 128;
2555
2556 val = get_unaligned_le32(&uuid[12]);
2557 if (val > 0xffff)
2558 return 32;
2559
2560 return 16;
2561 }
2562
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2563 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2564 {
2565 struct mgmt_pending_cmd *cmd = data;
2566
2567 bt_dev_dbg(hdev, "err %d", err);
2568
2569 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2570 mgmt_status(err), hdev->dev_class, 3);
2571
2572 mgmt_pending_free(cmd);
2573 }
2574
add_uuid_sync(struct hci_dev * hdev,void * data)2575 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2576 {
2577 int err;
2578
2579 err = hci_update_class_sync(hdev);
2580 if (err)
2581 return err;
2582
2583 return hci_update_eir_sync(hdev);
2584 }
2585
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2586 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2587 {
2588 struct mgmt_cp_add_uuid *cp = data;
2589 struct mgmt_pending_cmd *cmd;
2590 struct bt_uuid *uuid;
2591 int err;
2592
2593 bt_dev_dbg(hdev, "sock %p", sk);
2594
2595 hci_dev_lock(hdev);
2596
2597 if (pending_eir_or_class(hdev)) {
2598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2599 MGMT_STATUS_BUSY);
2600 goto failed;
2601 }
2602
2603 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2604 if (!uuid) {
2605 err = -ENOMEM;
2606 goto failed;
2607 }
2608
2609 memcpy(uuid->uuid, cp->uuid, 16);
2610 uuid->svc_hint = cp->svc_hint;
2611 uuid->size = get_uuid_size(cp->uuid);
2612
2613 list_add_tail(&uuid->list, &hdev->uuids);
2614
2615 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2616 if (!cmd) {
2617 err = -ENOMEM;
2618 goto failed;
2619 }
2620
2621 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2622 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2623 */
2624 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2625 mgmt_class_complete);
2626 if (err < 0) {
2627 mgmt_pending_free(cmd);
2628 goto failed;
2629 }
2630
2631 failed:
2632 hci_dev_unlock(hdev);
2633 return err;
2634 }
2635
enable_service_cache(struct hci_dev * hdev)2636 static bool enable_service_cache(struct hci_dev *hdev)
2637 {
2638 if (!hdev_is_powered(hdev))
2639 return false;
2640
2641 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2642 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2643 CACHE_TIMEOUT);
2644 return true;
2645 }
2646
2647 return false;
2648 }
2649
remove_uuid_sync(struct hci_dev * hdev,void * data)2650 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2651 {
2652 int err;
2653
2654 err = hci_update_class_sync(hdev);
2655 if (err)
2656 return err;
2657
2658 return hci_update_eir_sync(hdev);
2659 }
2660
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2661 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2662 u16 len)
2663 {
2664 struct mgmt_cp_remove_uuid *cp = data;
2665 struct mgmt_pending_cmd *cmd;
2666 struct bt_uuid *match, *tmp;
2667 static const u8 bt_uuid_any[] = {
2668 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2669 };
2670 int err, found;
2671
2672 bt_dev_dbg(hdev, "sock %p", sk);
2673
2674 hci_dev_lock(hdev);
2675
2676 if (pending_eir_or_class(hdev)) {
2677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2678 MGMT_STATUS_BUSY);
2679 goto unlock;
2680 }
2681
2682 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2683 hci_uuids_clear(hdev);
2684
2685 if (enable_service_cache(hdev)) {
2686 err = mgmt_cmd_complete(sk, hdev->id,
2687 MGMT_OP_REMOVE_UUID,
2688 0, hdev->dev_class, 3);
2689 goto unlock;
2690 }
2691
2692 goto update_class;
2693 }
2694
2695 found = 0;
2696
2697 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2698 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2699 continue;
2700
2701 list_del(&match->list);
2702 kfree(match);
2703 found++;
2704 }
2705
2706 if (found == 0) {
2707 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2708 MGMT_STATUS_INVALID_PARAMS);
2709 goto unlock;
2710 }
2711
2712 update_class:
2713 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2714 if (!cmd) {
2715 err = -ENOMEM;
2716 goto unlock;
2717 }
2718
2719 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2720 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2721 */
2722 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2723 mgmt_class_complete);
2724 if (err < 0)
2725 mgmt_pending_free(cmd);
2726
2727 unlock:
2728 hci_dev_unlock(hdev);
2729 return err;
2730 }
2731
set_class_sync(struct hci_dev * hdev,void * data)2732 static int set_class_sync(struct hci_dev *hdev, void *data)
2733 {
2734 int err = 0;
2735
2736 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2737 cancel_delayed_work_sync(&hdev->service_cache);
2738 err = hci_update_eir_sync(hdev);
2739 }
2740
2741 if (err)
2742 return err;
2743
2744 return hci_update_class_sync(hdev);
2745 }
2746
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2747 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2748 u16 len)
2749 {
2750 struct mgmt_cp_set_dev_class *cp = data;
2751 struct mgmt_pending_cmd *cmd;
2752 int err;
2753
2754 bt_dev_dbg(hdev, "sock %p", sk);
2755
2756 if (!lmp_bredr_capable(hdev))
2757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2758 MGMT_STATUS_NOT_SUPPORTED);
2759
2760 hci_dev_lock(hdev);
2761
2762 if (pending_eir_or_class(hdev)) {
2763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2764 MGMT_STATUS_BUSY);
2765 goto unlock;
2766 }
2767
2768 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2770 MGMT_STATUS_INVALID_PARAMS);
2771 goto unlock;
2772 }
2773
2774 hdev->major_class = cp->major;
2775 hdev->minor_class = cp->minor;
2776
2777 if (!hdev_is_powered(hdev)) {
2778 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2779 hdev->dev_class, 3);
2780 goto unlock;
2781 }
2782
2783 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2784 if (!cmd) {
2785 err = -ENOMEM;
2786 goto unlock;
2787 }
2788
2789 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2790 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2791 */
2792 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2793 mgmt_class_complete);
2794 if (err < 0)
2795 mgmt_pending_free(cmd);
2796
2797 unlock:
2798 hci_dev_unlock(hdev);
2799 return err;
2800 }
2801
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2802 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2803 u16 len)
2804 {
2805 struct mgmt_cp_load_link_keys *cp = data;
2806 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2807 sizeof(struct mgmt_link_key_info));
2808 u16 key_count, expected_len;
2809 bool changed;
2810 int i;
2811
2812 bt_dev_dbg(hdev, "sock %p", sk);
2813
2814 if (!lmp_bredr_capable(hdev))
2815 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2816 MGMT_STATUS_NOT_SUPPORTED);
2817
2818 key_count = __le16_to_cpu(cp->key_count);
2819 if (key_count > max_key_count) {
2820 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2821 key_count);
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 MGMT_STATUS_INVALID_PARAMS);
2824 }
2825
2826 expected_len = struct_size(cp, keys, key_count);
2827 if (expected_len != len) {
2828 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2829 expected_len, len);
2830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2831 MGMT_STATUS_INVALID_PARAMS);
2832 }
2833
2834 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2836 MGMT_STATUS_INVALID_PARAMS);
2837
2838 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2839 key_count);
2840
2841 hci_dev_lock(hdev);
2842
2843 hci_link_keys_clear(hdev);
2844
2845 if (cp->debug_keys)
2846 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2847 else
2848 changed = hci_dev_test_and_clear_flag(hdev,
2849 HCI_KEEP_DEBUG_KEYS);
2850
2851 if (changed)
2852 new_settings(hdev, NULL);
2853
2854 for (i = 0; i < key_count; i++) {
2855 struct mgmt_link_key_info *key = &cp->keys[i];
2856
2857 if (hci_is_blocked_key(hdev,
2858 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2859 key->val)) {
2860 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2861 &key->addr.bdaddr);
2862 continue;
2863 }
2864
2865 if (key->addr.type != BDADDR_BREDR) {
2866 bt_dev_warn(hdev,
2867 "Invalid link address type %u for %pMR",
2868 key->addr.type, &key->addr.bdaddr);
2869 continue;
2870 }
2871
2872 if (key->type > 0x08) {
2873 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2874 key->type, &key->addr.bdaddr);
2875 continue;
2876 }
2877
2878 /* Always ignore debug keys and require a new pairing if
2879 * the user wants to use them.
2880 */
2881 if (key->type == HCI_LK_DEBUG_COMBINATION)
2882 continue;
2883
2884 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2885 key->type, key->pin_len, NULL);
2886 }
2887
2888 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2889
2890 hci_dev_unlock(hdev);
2891
2892 return 0;
2893 }
2894
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2895 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2896 u8 addr_type, struct sock *skip_sk)
2897 {
2898 struct mgmt_ev_device_unpaired ev;
2899
2900 bacpy(&ev.addr.bdaddr, bdaddr);
2901 ev.addr.type = addr_type;
2902
2903 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2904 skip_sk);
2905 }
2906
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2907 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2908 {
2909 struct mgmt_pending_cmd *cmd = data;
2910 struct mgmt_cp_unpair_device *cp = cmd->param;
2911
2912 if (!err)
2913 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2914
2915 cmd->cmd_complete(cmd, err);
2916 mgmt_pending_free(cmd);
2917 }
2918
unpair_device_sync(struct hci_dev * hdev,void * data)2919 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2920 {
2921 struct mgmt_pending_cmd *cmd = data;
2922 struct mgmt_cp_unpair_device *cp = cmd->param;
2923 struct hci_conn *conn;
2924
2925 if (cp->addr.type == BDADDR_BREDR)
2926 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2927 &cp->addr.bdaddr);
2928 else
2929 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2930 le_addr_type(cp->addr.type));
2931
2932 if (!conn)
2933 return 0;
2934
2935 /* Disregard any possible error since the likes of hci_abort_conn_sync
2936 * will clean up the connection no matter the error.
2937 */
2938 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2939
2940 return 0;
2941 }
2942
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2943 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2944 u16 len)
2945 {
2946 struct mgmt_cp_unpair_device *cp = data;
2947 struct mgmt_rp_unpair_device rp;
2948 struct hci_conn_params *params;
2949 struct mgmt_pending_cmd *cmd;
2950 struct hci_conn *conn;
2951 u8 addr_type;
2952 int err;
2953
2954 memset(&rp, 0, sizeof(rp));
2955 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2956 rp.addr.type = cp->addr.type;
2957
2958 if (!bdaddr_type_is_valid(cp->addr.type))
2959 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2960 MGMT_STATUS_INVALID_PARAMS,
2961 &rp, sizeof(rp));
2962
2963 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2964 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2965 MGMT_STATUS_INVALID_PARAMS,
2966 &rp, sizeof(rp));
2967
2968 hci_dev_lock(hdev);
2969
2970 if (!hdev_is_powered(hdev)) {
2971 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2972 MGMT_STATUS_NOT_POWERED, &rp,
2973 sizeof(rp));
2974 goto unlock;
2975 }
2976
2977 if (cp->addr.type == BDADDR_BREDR) {
2978 /* If disconnection is requested, then look up the
2979 * connection. If the remote device is connected, it
2980 * will be later used to terminate the link.
2981 *
2982 * Setting it to NULL explicitly will cause no
2983 * termination of the link.
2984 */
2985 if (cp->disconnect)
2986 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2987 &cp->addr.bdaddr);
2988 else
2989 conn = NULL;
2990
2991 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2992 if (err < 0) {
2993 err = mgmt_cmd_complete(sk, hdev->id,
2994 MGMT_OP_UNPAIR_DEVICE,
2995 MGMT_STATUS_NOT_PAIRED, &rp,
2996 sizeof(rp));
2997 goto unlock;
2998 }
2999
3000 goto done;
3001 }
3002
3003 /* LE address type */
3004 addr_type = le_addr_type(cp->addr.type);
3005
3006 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3007 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3008 if (err < 0) {
3009 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3010 MGMT_STATUS_NOT_PAIRED, &rp,
3011 sizeof(rp));
3012 goto unlock;
3013 }
3014
3015 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3016 if (!conn) {
3017 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3018 goto done;
3019 }
3020
3021
3022 /* Defer clearing up the connection parameters until closing to
3023 * give a chance of keeping them if a repairing happens.
3024 */
3025 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3026
3027 /* Disable auto-connection parameters if present */
3028 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3029 if (params) {
3030 if (params->explicit_connect)
3031 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3032 else
3033 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3034 }
3035
3036 /* If disconnection is not requested, then clear the connection
3037 * variable so that the link is not terminated.
3038 */
3039 if (!cp->disconnect)
3040 conn = NULL;
3041
3042 done:
3043 /* If the connection variable is set, then termination of the
3044 * link is requested.
3045 */
3046 if (!conn) {
3047 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3048 &rp, sizeof(rp));
3049 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3050 goto unlock;
3051 }
3052
3053 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3054 sizeof(*cp));
3055 if (!cmd) {
3056 err = -ENOMEM;
3057 goto unlock;
3058 }
3059
3060 cmd->cmd_complete = addr_cmd_complete;
3061
3062 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3063 unpair_device_complete);
3064 if (err < 0)
3065 mgmt_pending_free(cmd);
3066
3067 unlock:
3068 hci_dev_unlock(hdev);
3069 return err;
3070 }
3071
disconnect_complete(struct hci_dev * hdev,void * data,int err)3072 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3073 {
3074 struct mgmt_pending_cmd *cmd = data;
3075
3076 cmd->cmd_complete(cmd, mgmt_status(err));
3077 mgmt_pending_free(cmd);
3078 }
3079
disconnect_sync(struct hci_dev * hdev,void * data)3080 static int disconnect_sync(struct hci_dev *hdev, void *data)
3081 {
3082 struct mgmt_pending_cmd *cmd = data;
3083 struct mgmt_cp_disconnect *cp = cmd->param;
3084 struct hci_conn *conn;
3085
3086 if (cp->addr.type == BDADDR_BREDR)
3087 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3088 &cp->addr.bdaddr);
3089 else
3090 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3091 le_addr_type(cp->addr.type));
3092
3093 if (!conn)
3094 return -ENOTCONN;
3095
3096 /* Disregard any possible error since the likes of hci_abort_conn_sync
3097 * will clean up the connection no matter the error.
3098 */
3099 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3100
3101 return 0;
3102 }
3103
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3104 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3105 u16 len)
3106 {
3107 struct mgmt_cp_disconnect *cp = data;
3108 struct mgmt_rp_disconnect rp;
3109 struct mgmt_pending_cmd *cmd;
3110 int err;
3111
3112 bt_dev_dbg(hdev, "sock %p", sk);
3113
3114 memset(&rp, 0, sizeof(rp));
3115 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3116 rp.addr.type = cp->addr.type;
3117
3118 if (!bdaddr_type_is_valid(cp->addr.type))
3119 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3120 MGMT_STATUS_INVALID_PARAMS,
3121 &rp, sizeof(rp));
3122
3123 hci_dev_lock(hdev);
3124
3125 if (!test_bit(HCI_UP, &hdev->flags)) {
3126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3127 MGMT_STATUS_NOT_POWERED, &rp,
3128 sizeof(rp));
3129 goto failed;
3130 }
3131
3132 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3133 if (!cmd) {
3134 err = -ENOMEM;
3135 goto failed;
3136 }
3137
3138 cmd->cmd_complete = generic_cmd_complete;
3139
3140 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3141 disconnect_complete);
3142 if (err < 0)
3143 mgmt_pending_free(cmd);
3144
3145 failed:
3146 hci_dev_unlock(hdev);
3147 return err;
3148 }
3149
link_to_bdaddr(u8 link_type,u8 addr_type)3150 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3151 {
3152 switch (link_type) {
3153 case ISO_LINK:
3154 case LE_LINK:
3155 switch (addr_type) {
3156 case ADDR_LE_DEV_PUBLIC:
3157 return BDADDR_LE_PUBLIC;
3158
3159 default:
3160 /* Fallback to LE Random address type */
3161 return BDADDR_LE_RANDOM;
3162 }
3163
3164 default:
3165 /* Fallback to BR/EDR type */
3166 return BDADDR_BREDR;
3167 }
3168 }
3169
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3170 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3171 u16 data_len)
3172 {
3173 struct mgmt_rp_get_connections *rp;
3174 struct hci_conn *c;
3175 int err;
3176 u16 i;
3177
3178 bt_dev_dbg(hdev, "sock %p", sk);
3179
3180 hci_dev_lock(hdev);
3181
3182 if (!hdev_is_powered(hdev)) {
3183 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3184 MGMT_STATUS_NOT_POWERED);
3185 goto unlock;
3186 }
3187
3188 i = 0;
3189 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3190 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3191 i++;
3192 }
3193
3194 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3195 if (!rp) {
3196 err = -ENOMEM;
3197 goto unlock;
3198 }
3199
3200 i = 0;
3201 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3202 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3203 continue;
3204 bacpy(&rp->addr[i].bdaddr, &c->dst);
3205 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3206 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3207 continue;
3208 i++;
3209 }
3210
3211 rp->conn_count = cpu_to_le16(i);
3212
3213 /* Recalculate length in case of filtered SCO connections, etc */
3214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3215 struct_size(rp, addr, i));
3216
3217 kfree(rp);
3218
3219 unlock:
3220 hci_dev_unlock(hdev);
3221 return err;
3222 }
3223
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3224 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3225 struct mgmt_cp_pin_code_neg_reply *cp)
3226 {
3227 struct mgmt_pending_cmd *cmd;
3228 int err;
3229
3230 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3231 sizeof(*cp));
3232 if (!cmd)
3233 return -ENOMEM;
3234
3235 cmd->cmd_complete = addr_cmd_complete;
3236
3237 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3238 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3239 if (err < 0)
3240 mgmt_pending_remove(cmd);
3241
3242 return err;
3243 }
3244
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3245 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3246 u16 len)
3247 {
3248 struct hci_conn *conn;
3249 struct mgmt_cp_pin_code_reply *cp = data;
3250 struct hci_cp_pin_code_reply reply;
3251 struct mgmt_pending_cmd *cmd;
3252 int err;
3253
3254 bt_dev_dbg(hdev, "sock %p", sk);
3255
3256 hci_dev_lock(hdev);
3257
3258 if (!hdev_is_powered(hdev)) {
3259 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3260 MGMT_STATUS_NOT_POWERED);
3261 goto failed;
3262 }
3263
3264 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3265 if (!conn) {
3266 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3267 MGMT_STATUS_NOT_CONNECTED);
3268 goto failed;
3269 }
3270
3271 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3272 struct mgmt_cp_pin_code_neg_reply ncp;
3273
3274 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3275
3276 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3277
3278 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3279 if (err >= 0)
3280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3281 MGMT_STATUS_INVALID_PARAMS);
3282
3283 goto failed;
3284 }
3285
3286 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3287 if (!cmd) {
3288 err = -ENOMEM;
3289 goto failed;
3290 }
3291
3292 cmd->cmd_complete = addr_cmd_complete;
3293
3294 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3295 reply.pin_len = cp->pin_len;
3296 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3297
3298 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3299 if (err < 0)
3300 mgmt_pending_remove(cmd);
3301
3302 failed:
3303 hci_dev_unlock(hdev);
3304 return err;
3305 }
3306
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3307 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3308 u16 len)
3309 {
3310 struct mgmt_cp_set_io_capability *cp = data;
3311
3312 bt_dev_dbg(hdev, "sock %p", sk);
3313
3314 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3315 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3316 MGMT_STATUS_INVALID_PARAMS);
3317
3318 hci_dev_lock(hdev);
3319
3320 hdev->io_capability = cp->io_capability;
3321
3322 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3323
3324 hci_dev_unlock(hdev);
3325
3326 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3327 NULL, 0);
3328 }
3329
find_pairing(struct hci_conn * conn)3330 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3331 {
3332 struct hci_dev *hdev = conn->hdev;
3333 struct mgmt_pending_cmd *cmd;
3334
3335 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3336 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3337 continue;
3338
3339 if (cmd->user_data != conn)
3340 continue;
3341
3342 return cmd;
3343 }
3344
3345 return NULL;
3346 }
3347
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3348 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3349 {
3350 struct mgmt_rp_pair_device rp;
3351 struct hci_conn *conn = cmd->user_data;
3352 int err;
3353
3354 bacpy(&rp.addr.bdaddr, &conn->dst);
3355 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3356
3357 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3358 status, &rp, sizeof(rp));
3359
3360 /* So we don't get further callbacks for this connection */
3361 conn->connect_cfm_cb = NULL;
3362 conn->security_cfm_cb = NULL;
3363 conn->disconn_cfm_cb = NULL;
3364
3365 hci_conn_drop(conn);
3366
3367 /* The device is paired so there is no need to remove
3368 * its connection parameters anymore.
3369 */
3370 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3371
3372 hci_conn_put(conn);
3373
3374 return err;
3375 }
3376
mgmt_smp_complete(struct hci_conn * conn,bool complete)3377 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3378 {
3379 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3380 struct mgmt_pending_cmd *cmd;
3381
3382 cmd = find_pairing(conn);
3383 if (cmd) {
3384 cmd->cmd_complete(cmd, status);
3385 mgmt_pending_remove(cmd);
3386 }
3387 }
3388
pairing_complete_cb(struct hci_conn * conn,u8 status)3389 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3390 {
3391 struct mgmt_pending_cmd *cmd;
3392
3393 BT_DBG("status %u", status);
3394
3395 cmd = find_pairing(conn);
3396 if (!cmd) {
3397 BT_DBG("Unable to find a pending command");
3398 return;
3399 }
3400
3401 cmd->cmd_complete(cmd, mgmt_status(status));
3402 mgmt_pending_remove(cmd);
3403 }
3404
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3405 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3406 {
3407 struct mgmt_pending_cmd *cmd;
3408
3409 BT_DBG("status %u", status);
3410
3411 if (!status)
3412 return;
3413
3414 cmd = find_pairing(conn);
3415 if (!cmd) {
3416 BT_DBG("Unable to find a pending command");
3417 return;
3418 }
3419
3420 cmd->cmd_complete(cmd, mgmt_status(status));
3421 mgmt_pending_remove(cmd);
3422 }
3423
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3424 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3425 u16 len)
3426 {
3427 struct mgmt_cp_pair_device *cp = data;
3428 struct mgmt_rp_pair_device rp;
3429 struct mgmt_pending_cmd *cmd;
3430 u8 sec_level, auth_type;
3431 struct hci_conn *conn;
3432 int err;
3433
3434 bt_dev_dbg(hdev, "sock %p", sk);
3435
3436 memset(&rp, 0, sizeof(rp));
3437 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3438 rp.addr.type = cp->addr.type;
3439
3440 if (!bdaddr_type_is_valid(cp->addr.type))
3441 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3442 MGMT_STATUS_INVALID_PARAMS,
3443 &rp, sizeof(rp));
3444
3445 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3446 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3447 MGMT_STATUS_INVALID_PARAMS,
3448 &rp, sizeof(rp));
3449
3450 hci_dev_lock(hdev);
3451
3452 if (!hdev_is_powered(hdev)) {
3453 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3454 MGMT_STATUS_NOT_POWERED, &rp,
3455 sizeof(rp));
3456 goto unlock;
3457 }
3458
3459 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3460 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3461 MGMT_STATUS_ALREADY_PAIRED, &rp,
3462 sizeof(rp));
3463 goto unlock;
3464 }
3465
3466 sec_level = BT_SECURITY_MEDIUM;
3467 auth_type = HCI_AT_DEDICATED_BONDING;
3468
3469 if (cp->addr.type == BDADDR_BREDR) {
3470 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3471 auth_type, CONN_REASON_PAIR_DEVICE);
3472 } else {
3473 u8 addr_type = le_addr_type(cp->addr.type);
3474 struct hci_conn_params *p;
3475
3476 /* When pairing a new device, it is expected to remember
3477 * this device for future connections. Adding the connection
3478 * parameter information ahead of time allows tracking
3479 * of the peripheral preferred values and will speed up any
3480 * further connection establishment.
3481 *
3482 * If connection parameters already exist, then they
3483 * will be kept and this function does nothing.
3484 */
3485 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3486 if (!p) {
3487 err = -EIO;
3488 goto unlock;
3489 }
3490
3491 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3492 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3493
3494 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3495 sec_level, HCI_LE_CONN_TIMEOUT,
3496 CONN_REASON_PAIR_DEVICE);
3497 }
3498
3499 if (IS_ERR(conn)) {
3500 int status;
3501
3502 if (PTR_ERR(conn) == -EBUSY)
3503 status = MGMT_STATUS_BUSY;
3504 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3505 status = MGMT_STATUS_NOT_SUPPORTED;
3506 else if (PTR_ERR(conn) == -ECONNREFUSED)
3507 status = MGMT_STATUS_REJECTED;
3508 else
3509 status = MGMT_STATUS_CONNECT_FAILED;
3510
3511 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3512 status, &rp, sizeof(rp));
3513 goto unlock;
3514 }
3515
3516 if (conn->connect_cfm_cb) {
3517 hci_conn_drop(conn);
3518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3520 goto unlock;
3521 }
3522
3523 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3524 if (!cmd) {
3525 err = -ENOMEM;
3526 hci_conn_drop(conn);
3527 goto unlock;
3528 }
3529
3530 cmd->cmd_complete = pairing_complete;
3531
3532 /* For LE, just connecting isn't a proof that the pairing finished */
3533 if (cp->addr.type == BDADDR_BREDR) {
3534 conn->connect_cfm_cb = pairing_complete_cb;
3535 conn->security_cfm_cb = pairing_complete_cb;
3536 conn->disconn_cfm_cb = pairing_complete_cb;
3537 } else {
3538 conn->connect_cfm_cb = le_pairing_complete_cb;
3539 conn->security_cfm_cb = le_pairing_complete_cb;
3540 conn->disconn_cfm_cb = le_pairing_complete_cb;
3541 }
3542
3543 conn->io_capability = cp->io_cap;
3544 cmd->user_data = hci_conn_get(conn);
3545
3546 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3547 hci_conn_security(conn, sec_level, auth_type, true)) {
3548 cmd->cmd_complete(cmd, 0);
3549 mgmt_pending_remove(cmd);
3550 }
3551
3552 err = 0;
3553
3554 unlock:
3555 hci_dev_unlock(hdev);
3556 return err;
3557 }
3558
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3559 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3560 u16 len)
3561 {
3562 struct mgmt_addr_info *addr = data;
3563 struct mgmt_pending_cmd *cmd;
3564 struct hci_conn *conn;
3565 int err;
3566
3567 bt_dev_dbg(hdev, "sock %p", sk);
3568
3569 hci_dev_lock(hdev);
3570
3571 if (!hdev_is_powered(hdev)) {
3572 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3573 MGMT_STATUS_NOT_POWERED);
3574 goto unlock;
3575 }
3576
3577 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3578 if (!cmd) {
3579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3580 MGMT_STATUS_INVALID_PARAMS);
3581 goto unlock;
3582 }
3583
3584 conn = cmd->user_data;
3585
3586 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3588 MGMT_STATUS_INVALID_PARAMS);
3589 goto unlock;
3590 }
3591
3592 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3593 mgmt_pending_remove(cmd);
3594
3595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3596 addr, sizeof(*addr));
3597
3598 /* Since user doesn't want to proceed with the connection, abort any
3599 * ongoing pairing and then terminate the link if it was created
3600 * because of the pair device action.
3601 */
3602 if (addr->type == BDADDR_BREDR)
3603 hci_remove_link_key(hdev, &addr->bdaddr);
3604 else
3605 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3606 le_addr_type(addr->type));
3607
3608 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3609 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3610
3611 unlock:
3612 hci_dev_unlock(hdev);
3613 return err;
3614 }
3615
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3616 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3617 struct mgmt_addr_info *addr, u16 mgmt_op,
3618 u16 hci_op, __le32 passkey)
3619 {
3620 struct mgmt_pending_cmd *cmd;
3621 struct hci_conn *conn;
3622 int err;
3623
3624 hci_dev_lock(hdev);
3625
3626 if (!hdev_is_powered(hdev)) {
3627 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3628 MGMT_STATUS_NOT_POWERED, addr,
3629 sizeof(*addr));
3630 goto done;
3631 }
3632
3633 if (addr->type == BDADDR_BREDR)
3634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3635 else
3636 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3637 le_addr_type(addr->type));
3638
3639 if (!conn) {
3640 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3641 MGMT_STATUS_NOT_CONNECTED, addr,
3642 sizeof(*addr));
3643 goto done;
3644 }
3645
3646 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3647 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3648 if (!err)
3649 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3650 MGMT_STATUS_SUCCESS, addr,
3651 sizeof(*addr));
3652 else
3653 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3654 MGMT_STATUS_FAILED, addr,
3655 sizeof(*addr));
3656
3657 goto done;
3658 }
3659
3660 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3661 if (!cmd) {
3662 err = -ENOMEM;
3663 goto done;
3664 }
3665
3666 cmd->cmd_complete = addr_cmd_complete;
3667
3668 /* Continue with pairing via HCI */
3669 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3670 struct hci_cp_user_passkey_reply cp;
3671
3672 bacpy(&cp.bdaddr, &addr->bdaddr);
3673 cp.passkey = passkey;
3674 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3675 } else
3676 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3677 &addr->bdaddr);
3678
3679 if (err < 0)
3680 mgmt_pending_remove(cmd);
3681
3682 done:
3683 hci_dev_unlock(hdev);
3684 return err;
3685 }
3686
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3687 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3688 void *data, u16 len)
3689 {
3690 struct mgmt_cp_pin_code_neg_reply *cp = data;
3691
3692 bt_dev_dbg(hdev, "sock %p", sk);
3693
3694 return user_pairing_resp(sk, hdev, &cp->addr,
3695 MGMT_OP_PIN_CODE_NEG_REPLY,
3696 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3697 }
3698
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3699 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3700 u16 len)
3701 {
3702 struct mgmt_cp_user_confirm_reply *cp = data;
3703
3704 bt_dev_dbg(hdev, "sock %p", sk);
3705
3706 if (len != sizeof(*cp))
3707 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3708 MGMT_STATUS_INVALID_PARAMS);
3709
3710 return user_pairing_resp(sk, hdev, &cp->addr,
3711 MGMT_OP_USER_CONFIRM_REPLY,
3712 HCI_OP_USER_CONFIRM_REPLY, 0);
3713 }
3714
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3715 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3716 void *data, u16 len)
3717 {
3718 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3719
3720 bt_dev_dbg(hdev, "sock %p", sk);
3721
3722 return user_pairing_resp(sk, hdev, &cp->addr,
3723 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3724 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3725 }
3726
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3727 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3728 u16 len)
3729 {
3730 struct mgmt_cp_user_passkey_reply *cp = data;
3731
3732 bt_dev_dbg(hdev, "sock %p", sk);
3733
3734 return user_pairing_resp(sk, hdev, &cp->addr,
3735 MGMT_OP_USER_PASSKEY_REPLY,
3736 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3737 }
3738
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3739 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3740 void *data, u16 len)
3741 {
3742 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3743
3744 bt_dev_dbg(hdev, "sock %p", sk);
3745
3746 return user_pairing_resp(sk, hdev, &cp->addr,
3747 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3748 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3749 }
3750
adv_expire_sync(struct hci_dev * hdev,u32 flags)3751 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3752 {
3753 struct adv_info *adv_instance;
3754
3755 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3756 if (!adv_instance)
3757 return 0;
3758
3759 /* stop if current instance doesn't need to be changed */
3760 if (!(adv_instance->flags & flags))
3761 return 0;
3762
3763 cancel_adv_timeout(hdev);
3764
3765 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3766 if (!adv_instance)
3767 return 0;
3768
3769 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3770
3771 return 0;
3772 }
3773
name_changed_sync(struct hci_dev * hdev,void * data)3774 static int name_changed_sync(struct hci_dev *hdev, void *data)
3775 {
3776 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3777 }
3778
set_name_complete(struct hci_dev * hdev,void * data,int err)3779 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3780 {
3781 struct mgmt_pending_cmd *cmd = data;
3782 struct mgmt_cp_set_local_name *cp = cmd->param;
3783 u8 status = mgmt_status(err);
3784
3785 bt_dev_dbg(hdev, "err %d", err);
3786
3787 if (err == -ECANCELED ||
3788 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3789 return;
3790
3791 if (status) {
3792 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3793 status);
3794 } else {
3795 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3796 cp, sizeof(*cp));
3797
3798 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3799 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3800 }
3801
3802 mgmt_pending_remove(cmd);
3803 }
3804
set_name_sync(struct hci_dev * hdev,void * data)3805 static int set_name_sync(struct hci_dev *hdev, void *data)
3806 {
3807 if (lmp_bredr_capable(hdev)) {
3808 hci_update_name_sync(hdev);
3809 hci_update_eir_sync(hdev);
3810 }
3811
3812 /* The name is stored in the scan response data and so
3813 * no need to update the advertising data here.
3814 */
3815 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3816 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3817
3818 return 0;
3819 }
3820
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3821 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3822 u16 len)
3823 {
3824 struct mgmt_cp_set_local_name *cp = data;
3825 struct mgmt_pending_cmd *cmd;
3826 int err;
3827
3828 bt_dev_dbg(hdev, "sock %p", sk);
3829
3830 hci_dev_lock(hdev);
3831
3832 /* If the old values are the same as the new ones just return a
3833 * direct command complete event.
3834 */
3835 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3836 !memcmp(hdev->short_name, cp->short_name,
3837 sizeof(hdev->short_name))) {
3838 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3839 data, len);
3840 goto failed;
3841 }
3842
3843 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3844
3845 if (!hdev_is_powered(hdev)) {
3846 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3847
3848 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3849 data, len);
3850 if (err < 0)
3851 goto failed;
3852
3853 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3854 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3855 ext_info_changed(hdev, sk);
3856
3857 goto failed;
3858 }
3859
3860 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3861 if (!cmd)
3862 err = -ENOMEM;
3863 else
3864 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3865 set_name_complete);
3866
3867 if (err < 0) {
3868 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3869 MGMT_STATUS_FAILED);
3870
3871 if (cmd)
3872 mgmt_pending_remove(cmd);
3873
3874 goto failed;
3875 }
3876
3877 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3878
3879 failed:
3880 hci_dev_unlock(hdev);
3881 return err;
3882 }
3883
appearance_changed_sync(struct hci_dev * hdev,void * data)3884 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3885 {
3886 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3887 }
3888
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3889 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3890 u16 len)
3891 {
3892 struct mgmt_cp_set_appearance *cp = data;
3893 u16 appearance;
3894 int err;
3895
3896 bt_dev_dbg(hdev, "sock %p", sk);
3897
3898 if (!lmp_le_capable(hdev))
3899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3900 MGMT_STATUS_NOT_SUPPORTED);
3901
3902 appearance = le16_to_cpu(cp->appearance);
3903
3904 hci_dev_lock(hdev);
3905
3906 if (hdev->appearance != appearance) {
3907 hdev->appearance = appearance;
3908
3909 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3910 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3911 NULL);
3912
3913 ext_info_changed(hdev, sk);
3914 }
3915
3916 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3917 0);
3918
3919 hci_dev_unlock(hdev);
3920
3921 return err;
3922 }
3923
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3924 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3925 void *data, u16 len)
3926 {
3927 struct mgmt_rp_get_phy_configuration rp;
3928
3929 bt_dev_dbg(hdev, "sock %p", sk);
3930
3931 hci_dev_lock(hdev);
3932
3933 memset(&rp, 0, sizeof(rp));
3934
3935 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3936 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3937 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3938
3939 hci_dev_unlock(hdev);
3940
3941 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3942 &rp, sizeof(rp));
3943 }
3944
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3945 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3946 {
3947 struct mgmt_ev_phy_configuration_changed ev;
3948
3949 memset(&ev, 0, sizeof(ev));
3950
3951 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3952
3953 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3954 sizeof(ev), skip);
3955 }
3956
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3957 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3958 {
3959 struct mgmt_pending_cmd *cmd = data;
3960 struct sk_buff *skb = cmd->skb;
3961 u8 status = mgmt_status(err);
3962
3963 if (err == -ECANCELED ||
3964 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3965 return;
3966
3967 if (!status) {
3968 if (!skb)
3969 status = MGMT_STATUS_FAILED;
3970 else if (IS_ERR(skb))
3971 status = mgmt_status(PTR_ERR(skb));
3972 else
3973 status = mgmt_status(skb->data[0]);
3974 }
3975
3976 bt_dev_dbg(hdev, "status %d", status);
3977
3978 if (status) {
3979 mgmt_cmd_status(cmd->sk, hdev->id,
3980 MGMT_OP_SET_PHY_CONFIGURATION, status);
3981 } else {
3982 mgmt_cmd_complete(cmd->sk, hdev->id,
3983 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3984 NULL, 0);
3985
3986 mgmt_phy_configuration_changed(hdev, cmd->sk);
3987 }
3988
3989 if (skb && !IS_ERR(skb))
3990 kfree_skb(skb);
3991
3992 mgmt_pending_remove(cmd);
3993 }
3994
set_default_phy_sync(struct hci_dev * hdev,void * data)3995 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3996 {
3997 struct mgmt_pending_cmd *cmd = data;
3998 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3999 struct hci_cp_le_set_default_phy cp_phy;
4000 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4001
4002 memset(&cp_phy, 0, sizeof(cp_phy));
4003
4004 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4005 cp_phy.all_phys |= 0x01;
4006
4007 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4008 cp_phy.all_phys |= 0x02;
4009
4010 if (selected_phys & MGMT_PHY_LE_1M_TX)
4011 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4012
4013 if (selected_phys & MGMT_PHY_LE_2M_TX)
4014 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4015
4016 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4017 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4018
4019 if (selected_phys & MGMT_PHY_LE_1M_RX)
4020 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4021
4022 if (selected_phys & MGMT_PHY_LE_2M_RX)
4023 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4024
4025 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4026 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4027
4028 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4029 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4030
4031 return 0;
4032 }
4033
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4034 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4035 void *data, u16 len)
4036 {
4037 struct mgmt_cp_set_phy_configuration *cp = data;
4038 struct mgmt_pending_cmd *cmd;
4039 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4040 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4041 bool changed = false;
4042 int err;
4043
4044 bt_dev_dbg(hdev, "sock %p", sk);
4045
4046 configurable_phys = get_configurable_phys(hdev);
4047 supported_phys = get_supported_phys(hdev);
4048 selected_phys = __le32_to_cpu(cp->selected_phys);
4049
4050 if (selected_phys & ~supported_phys)
4051 return mgmt_cmd_status(sk, hdev->id,
4052 MGMT_OP_SET_PHY_CONFIGURATION,
4053 MGMT_STATUS_INVALID_PARAMS);
4054
4055 unconfigure_phys = supported_phys & ~configurable_phys;
4056
4057 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4058 return mgmt_cmd_status(sk, hdev->id,
4059 MGMT_OP_SET_PHY_CONFIGURATION,
4060 MGMT_STATUS_INVALID_PARAMS);
4061
4062 if (selected_phys == get_selected_phys(hdev))
4063 return mgmt_cmd_complete(sk, hdev->id,
4064 MGMT_OP_SET_PHY_CONFIGURATION,
4065 0, NULL, 0);
4066
4067 hci_dev_lock(hdev);
4068
4069 if (!hdev_is_powered(hdev)) {
4070 err = mgmt_cmd_status(sk, hdev->id,
4071 MGMT_OP_SET_PHY_CONFIGURATION,
4072 MGMT_STATUS_REJECTED);
4073 goto unlock;
4074 }
4075
4076 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4077 err = mgmt_cmd_status(sk, hdev->id,
4078 MGMT_OP_SET_PHY_CONFIGURATION,
4079 MGMT_STATUS_BUSY);
4080 goto unlock;
4081 }
4082
4083 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4084 pkt_type |= (HCI_DH3 | HCI_DM3);
4085 else
4086 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4087
4088 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4089 pkt_type |= (HCI_DH5 | HCI_DM5);
4090 else
4091 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4092
4093 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4094 pkt_type &= ~HCI_2DH1;
4095 else
4096 pkt_type |= HCI_2DH1;
4097
4098 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4099 pkt_type &= ~HCI_2DH3;
4100 else
4101 pkt_type |= HCI_2DH3;
4102
4103 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4104 pkt_type &= ~HCI_2DH5;
4105 else
4106 pkt_type |= HCI_2DH5;
4107
4108 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4109 pkt_type &= ~HCI_3DH1;
4110 else
4111 pkt_type |= HCI_3DH1;
4112
4113 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4114 pkt_type &= ~HCI_3DH3;
4115 else
4116 pkt_type |= HCI_3DH3;
4117
4118 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4119 pkt_type &= ~HCI_3DH5;
4120 else
4121 pkt_type |= HCI_3DH5;
4122
4123 if (pkt_type != hdev->pkt_type) {
4124 hdev->pkt_type = pkt_type;
4125 changed = true;
4126 }
4127
4128 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4129 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4130 if (changed)
4131 mgmt_phy_configuration_changed(hdev, sk);
4132
4133 err = mgmt_cmd_complete(sk, hdev->id,
4134 MGMT_OP_SET_PHY_CONFIGURATION,
4135 0, NULL, 0);
4136
4137 goto unlock;
4138 }
4139
4140 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4141 len);
4142 if (!cmd)
4143 err = -ENOMEM;
4144 else
4145 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4146 set_default_phy_complete);
4147
4148 if (err < 0) {
4149 err = mgmt_cmd_status(sk, hdev->id,
4150 MGMT_OP_SET_PHY_CONFIGURATION,
4151 MGMT_STATUS_FAILED);
4152
4153 if (cmd)
4154 mgmt_pending_remove(cmd);
4155 }
4156
4157 unlock:
4158 hci_dev_unlock(hdev);
4159
4160 return err;
4161 }
4162
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4163 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4164 u16 len)
4165 {
4166 int err = MGMT_STATUS_SUCCESS;
4167 struct mgmt_cp_set_blocked_keys *keys = data;
4168 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4169 sizeof(struct mgmt_blocked_key_info));
4170 u16 key_count, expected_len;
4171 int i;
4172
4173 bt_dev_dbg(hdev, "sock %p", sk);
4174
4175 key_count = __le16_to_cpu(keys->key_count);
4176 if (key_count > max_key_count) {
4177 bt_dev_err(hdev, "too big key_count value %u", key_count);
4178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4179 MGMT_STATUS_INVALID_PARAMS);
4180 }
4181
4182 expected_len = struct_size(keys, keys, key_count);
4183 if (expected_len != len) {
4184 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4185 expected_len, len);
4186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4187 MGMT_STATUS_INVALID_PARAMS);
4188 }
4189
4190 hci_dev_lock(hdev);
4191
4192 hci_blocked_keys_clear(hdev);
4193
4194 for (i = 0; i < key_count; ++i) {
4195 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4196
4197 if (!b) {
4198 err = MGMT_STATUS_NO_RESOURCES;
4199 break;
4200 }
4201
4202 b->type = keys->keys[i].type;
4203 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4204 list_add_rcu(&b->list, &hdev->blocked_keys);
4205 }
4206 hci_dev_unlock(hdev);
4207
4208 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4209 err, NULL, 0);
4210 }
4211
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4212 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4213 void *data, u16 len)
4214 {
4215 struct mgmt_mode *cp = data;
4216 int err;
4217 bool changed = false;
4218
4219 bt_dev_dbg(hdev, "sock %p", sk);
4220
4221 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4222 return mgmt_cmd_status(sk, hdev->id,
4223 MGMT_OP_SET_WIDEBAND_SPEECH,
4224 MGMT_STATUS_NOT_SUPPORTED);
4225
4226 if (cp->val != 0x00 && cp->val != 0x01)
4227 return mgmt_cmd_status(sk, hdev->id,
4228 MGMT_OP_SET_WIDEBAND_SPEECH,
4229 MGMT_STATUS_INVALID_PARAMS);
4230
4231 hci_dev_lock(hdev);
4232
4233 if (hdev_is_powered(hdev) &&
4234 !!cp->val != hci_dev_test_flag(hdev,
4235 HCI_WIDEBAND_SPEECH_ENABLED)) {
4236 err = mgmt_cmd_status(sk, hdev->id,
4237 MGMT_OP_SET_WIDEBAND_SPEECH,
4238 MGMT_STATUS_REJECTED);
4239 goto unlock;
4240 }
4241
4242 if (cp->val)
4243 changed = !hci_dev_test_and_set_flag(hdev,
4244 HCI_WIDEBAND_SPEECH_ENABLED);
4245 else
4246 changed = hci_dev_test_and_clear_flag(hdev,
4247 HCI_WIDEBAND_SPEECH_ENABLED);
4248
4249 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4250 if (err < 0)
4251 goto unlock;
4252
4253 if (changed)
4254 err = new_settings(hdev, sk);
4255
4256 unlock:
4257 hci_dev_unlock(hdev);
4258 return err;
4259 }
4260
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4261 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4262 void *data, u16 data_len)
4263 {
4264 char buf[20];
4265 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4266 u16 cap_len = 0;
4267 u8 flags = 0;
4268 u8 tx_power_range[2];
4269
4270 bt_dev_dbg(hdev, "sock %p", sk);
4271
4272 memset(&buf, 0, sizeof(buf));
4273
4274 hci_dev_lock(hdev);
4275
4276 /* When the Read Simple Pairing Options command is supported, then
4277 * the remote public key validation is supported.
4278 *
4279 * Alternatively, when Microsoft extensions are available, they can
4280 * indicate support for public key validation as well.
4281 */
4282 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4283 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4284
4285 flags |= 0x02; /* Remote public key validation (LE) */
4286
4287 /* When the Read Encryption Key Size command is supported, then the
4288 * encryption key size is enforced.
4289 */
4290 if (hdev->commands[20] & 0x10)
4291 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4292
4293 flags |= 0x08; /* Encryption key size enforcement (LE) */
4294
4295 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4296 &flags, 1);
4297
4298 /* When the Read Simple Pairing Options command is supported, then
4299 * also max encryption key size information is provided.
4300 */
4301 if (hdev->commands[41] & 0x08)
4302 cap_len = eir_append_le16(rp->cap, cap_len,
4303 MGMT_CAP_MAX_ENC_KEY_SIZE,
4304 hdev->max_enc_key_size);
4305
4306 cap_len = eir_append_le16(rp->cap, cap_len,
4307 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4308 SMP_MAX_ENC_KEY_SIZE);
4309
4310 /* Append the min/max LE tx power parameters if we were able to fetch
4311 * it from the controller
4312 */
4313 if (hdev->commands[38] & 0x80) {
4314 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4315 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4316 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4317 tx_power_range, 2);
4318 }
4319
4320 rp->cap_len = cpu_to_le16(cap_len);
4321
4322 hci_dev_unlock(hdev);
4323
4324 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4325 rp, sizeof(*rp) + cap_len);
4326 }
4327
4328 #ifdef CONFIG_BT_FEATURE_DEBUG
4329 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4330 static const u8 debug_uuid[16] = {
4331 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4332 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4333 };
4334 #endif
4335
4336 /* 330859bc-7506-492d-9370-9a6f0614037f */
4337 static const u8 quality_report_uuid[16] = {
4338 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4339 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4340 };
4341
4342 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4343 static const u8 offload_codecs_uuid[16] = {
4344 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4345 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4346 };
4347
4348 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4349 static const u8 le_simultaneous_roles_uuid[16] = {
4350 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4351 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4352 };
4353
4354 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4355 static const u8 rpa_resolution_uuid[16] = {
4356 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4357 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4358 };
4359
4360 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4361 static const u8 iso_socket_uuid[16] = {
4362 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4363 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4364 };
4365
4366 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4367 static const u8 mgmt_mesh_uuid[16] = {
4368 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4369 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4370 };
4371
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4372 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4373 void *data, u16 data_len)
4374 {
4375 struct mgmt_rp_read_exp_features_info *rp;
4376 size_t len;
4377 u16 idx = 0;
4378 u32 flags;
4379 int status;
4380
4381 bt_dev_dbg(hdev, "sock %p", sk);
4382
4383 /* Enough space for 7 features */
4384 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4385 rp = kzalloc(len, GFP_KERNEL);
4386 if (!rp)
4387 return -ENOMEM;
4388
4389 #ifdef CONFIG_BT_FEATURE_DEBUG
4390 if (!hdev) {
4391 flags = bt_dbg_get() ? BIT(0) : 0;
4392
4393 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4394 rp->features[idx].flags = cpu_to_le32(flags);
4395 idx++;
4396 }
4397 #endif
4398
4399 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4400 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4401 flags = BIT(0);
4402 else
4403 flags = 0;
4404
4405 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4406 rp->features[idx].flags = cpu_to_le32(flags);
4407 idx++;
4408 }
4409
4410 if (hdev && ll_privacy_capable(hdev)) {
4411 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4412 flags = BIT(0) | BIT(1);
4413 else
4414 flags = BIT(1);
4415
4416 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4417 rp->features[idx].flags = cpu_to_le32(flags);
4418 idx++;
4419 }
4420
4421 if (hdev && (aosp_has_quality_report(hdev) ||
4422 hdev->set_quality_report)) {
4423 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4424 flags = BIT(0);
4425 else
4426 flags = 0;
4427
4428 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4429 rp->features[idx].flags = cpu_to_le32(flags);
4430 idx++;
4431 }
4432
4433 if (hdev && hdev->get_data_path_id) {
4434 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4435 flags = BIT(0);
4436 else
4437 flags = 0;
4438
4439 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4440 rp->features[idx].flags = cpu_to_le32(flags);
4441 idx++;
4442 }
4443
4444 if (IS_ENABLED(CONFIG_BT_LE)) {
4445 flags = iso_enabled() ? BIT(0) : 0;
4446 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4447 rp->features[idx].flags = cpu_to_le32(flags);
4448 idx++;
4449 }
4450
4451 if (hdev && lmp_le_capable(hdev)) {
4452 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4453 flags = BIT(0);
4454 else
4455 flags = 0;
4456
4457 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4458 rp->features[idx].flags = cpu_to_le32(flags);
4459 idx++;
4460 }
4461
4462 rp->feature_count = cpu_to_le16(idx);
4463
4464 /* After reading the experimental features information, enable
4465 * the events to update client on any future change.
4466 */
4467 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4468
4469 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4470 MGMT_OP_READ_EXP_FEATURES_INFO,
4471 0, rp, sizeof(*rp) + (20 * idx));
4472
4473 kfree(rp);
4474 return status;
4475 }
4476
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4477 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4478 struct sock *skip)
4479 {
4480 struct mgmt_ev_exp_feature_changed ev;
4481
4482 memset(&ev, 0, sizeof(ev));
4483 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4484 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4485
4486 // Do we need to be atomic with the conn_flags?
4487 if (enabled && privacy_mode_capable(hdev))
4488 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4489 else
4490 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4491
4492 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4493 &ev, sizeof(ev),
4494 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4495
4496 }
4497
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4498 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4499 bool enabled, struct sock *skip)
4500 {
4501 struct mgmt_ev_exp_feature_changed ev;
4502
4503 memset(&ev, 0, sizeof(ev));
4504 memcpy(ev.uuid, uuid, 16);
4505 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4506
4507 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4508 &ev, sizeof(ev),
4509 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4510 }
4511
4512 #define EXP_FEAT(_uuid, _set_func) \
4513 { \
4514 .uuid = _uuid, \
4515 .set_func = _set_func, \
4516 }
4517
4518 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4519 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4520 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4521 {
4522 struct mgmt_rp_set_exp_feature rp;
4523
4524 memset(rp.uuid, 0, 16);
4525 rp.flags = cpu_to_le32(0);
4526
4527 #ifdef CONFIG_BT_FEATURE_DEBUG
4528 if (!hdev) {
4529 bool changed = bt_dbg_get();
4530
4531 bt_dbg_set(false);
4532
4533 if (changed)
4534 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4535 }
4536 #endif
4537
4538 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4539 bool changed;
4540
4541 changed = hci_dev_test_and_clear_flag(hdev,
4542 HCI_ENABLE_LL_PRIVACY);
4543 if (changed)
4544 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4545 sk);
4546 }
4547
4548 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4549
4550 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4551 MGMT_OP_SET_EXP_FEATURE, 0,
4552 &rp, sizeof(rp));
4553 }
4554
4555 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4556 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4557 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4558 {
4559 struct mgmt_rp_set_exp_feature rp;
4560
4561 bool val, changed;
4562 int err;
4563
4564 /* Command requires to use the non-controller index */
4565 if (hdev)
4566 return mgmt_cmd_status(sk, hdev->id,
4567 MGMT_OP_SET_EXP_FEATURE,
4568 MGMT_STATUS_INVALID_INDEX);
4569
4570 /* Parameters are limited to a single octet */
4571 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4572 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4573 MGMT_OP_SET_EXP_FEATURE,
4574 MGMT_STATUS_INVALID_PARAMS);
4575
4576 /* Only boolean on/off is supported */
4577 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4578 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4579 MGMT_OP_SET_EXP_FEATURE,
4580 MGMT_STATUS_INVALID_PARAMS);
4581
4582 val = !!cp->param[0];
4583 changed = val ? !bt_dbg_get() : bt_dbg_get();
4584 bt_dbg_set(val);
4585
4586 memcpy(rp.uuid, debug_uuid, 16);
4587 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4588
4589 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4590
4591 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4592 MGMT_OP_SET_EXP_FEATURE, 0,
4593 &rp, sizeof(rp));
4594
4595 if (changed)
4596 exp_feature_changed(hdev, debug_uuid, val, sk);
4597
4598 return err;
4599 }
4600 #endif
4601
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4602 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4603 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4604 {
4605 struct mgmt_rp_set_exp_feature rp;
4606 bool val, changed;
4607 int err;
4608
4609 /* Command requires to use the controller index */
4610 if (!hdev)
4611 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4612 MGMT_OP_SET_EXP_FEATURE,
4613 MGMT_STATUS_INVALID_INDEX);
4614
4615 /* Parameters are limited to a single octet */
4616 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4617 return mgmt_cmd_status(sk, hdev->id,
4618 MGMT_OP_SET_EXP_FEATURE,
4619 MGMT_STATUS_INVALID_PARAMS);
4620
4621 /* Only boolean on/off is supported */
4622 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4623 return mgmt_cmd_status(sk, hdev->id,
4624 MGMT_OP_SET_EXP_FEATURE,
4625 MGMT_STATUS_INVALID_PARAMS);
4626
4627 val = !!cp->param[0];
4628
4629 if (val) {
4630 changed = !hci_dev_test_and_set_flag(hdev,
4631 HCI_MESH_EXPERIMENTAL);
4632 } else {
4633 hci_dev_clear_flag(hdev, HCI_MESH);
4634 changed = hci_dev_test_and_clear_flag(hdev,
4635 HCI_MESH_EXPERIMENTAL);
4636 }
4637
4638 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4639 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4640
4641 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4642
4643 err = mgmt_cmd_complete(sk, hdev->id,
4644 MGMT_OP_SET_EXP_FEATURE, 0,
4645 &rp, sizeof(rp));
4646
4647 if (changed)
4648 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4649
4650 return err;
4651 }
4652
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4653 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4654 struct mgmt_cp_set_exp_feature *cp,
4655 u16 data_len)
4656 {
4657 struct mgmt_rp_set_exp_feature rp;
4658 bool val, changed;
4659 int err;
4660 u32 flags;
4661
4662 /* Command requires to use the controller index */
4663 if (!hdev)
4664 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4665 MGMT_OP_SET_EXP_FEATURE,
4666 MGMT_STATUS_INVALID_INDEX);
4667
4668 /* Changes can only be made when controller is powered down */
4669 if (hdev_is_powered(hdev))
4670 return mgmt_cmd_status(sk, hdev->id,
4671 MGMT_OP_SET_EXP_FEATURE,
4672 MGMT_STATUS_REJECTED);
4673
4674 /* Parameters are limited to a single octet */
4675 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4676 return mgmt_cmd_status(sk, hdev->id,
4677 MGMT_OP_SET_EXP_FEATURE,
4678 MGMT_STATUS_INVALID_PARAMS);
4679
4680 /* Only boolean on/off is supported */
4681 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4682 return mgmt_cmd_status(sk, hdev->id,
4683 MGMT_OP_SET_EXP_FEATURE,
4684 MGMT_STATUS_INVALID_PARAMS);
4685
4686 val = !!cp->param[0];
4687
4688 if (val) {
4689 changed = !hci_dev_test_and_set_flag(hdev,
4690 HCI_ENABLE_LL_PRIVACY);
4691 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4692
4693 /* Enable LL privacy + supported settings changed */
4694 flags = BIT(0) | BIT(1);
4695 } else {
4696 changed = hci_dev_test_and_clear_flag(hdev,
4697 HCI_ENABLE_LL_PRIVACY);
4698
4699 /* Disable LL privacy + supported settings changed */
4700 flags = BIT(1);
4701 }
4702
4703 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4704 rp.flags = cpu_to_le32(flags);
4705
4706 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4707
4708 err = mgmt_cmd_complete(sk, hdev->id,
4709 MGMT_OP_SET_EXP_FEATURE, 0,
4710 &rp, sizeof(rp));
4711
4712 if (changed)
4713 exp_ll_privacy_feature_changed(val, hdev, sk);
4714
4715 return err;
4716 }
4717
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4718 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4719 struct mgmt_cp_set_exp_feature *cp,
4720 u16 data_len)
4721 {
4722 struct mgmt_rp_set_exp_feature rp;
4723 bool val, changed;
4724 int err;
4725
4726 /* Command requires to use a valid controller index */
4727 if (!hdev)
4728 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4729 MGMT_OP_SET_EXP_FEATURE,
4730 MGMT_STATUS_INVALID_INDEX);
4731
4732 /* Parameters are limited to a single octet */
4733 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4734 return mgmt_cmd_status(sk, hdev->id,
4735 MGMT_OP_SET_EXP_FEATURE,
4736 MGMT_STATUS_INVALID_PARAMS);
4737
4738 /* Only boolean on/off is supported */
4739 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4740 return mgmt_cmd_status(sk, hdev->id,
4741 MGMT_OP_SET_EXP_FEATURE,
4742 MGMT_STATUS_INVALID_PARAMS);
4743
4744 hci_req_sync_lock(hdev);
4745
4746 val = !!cp->param[0];
4747 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4748
4749 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4750 err = mgmt_cmd_status(sk, hdev->id,
4751 MGMT_OP_SET_EXP_FEATURE,
4752 MGMT_STATUS_NOT_SUPPORTED);
4753 goto unlock_quality_report;
4754 }
4755
4756 if (changed) {
4757 if (hdev->set_quality_report)
4758 err = hdev->set_quality_report(hdev, val);
4759 else
4760 err = aosp_set_quality_report(hdev, val);
4761
4762 if (err) {
4763 err = mgmt_cmd_status(sk, hdev->id,
4764 MGMT_OP_SET_EXP_FEATURE,
4765 MGMT_STATUS_FAILED);
4766 goto unlock_quality_report;
4767 }
4768
4769 if (val)
4770 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4771 else
4772 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4773 }
4774
4775 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4776
4777 memcpy(rp.uuid, quality_report_uuid, 16);
4778 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4779 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4780
4781 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4782 &rp, sizeof(rp));
4783
4784 if (changed)
4785 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4786
4787 unlock_quality_report:
4788 hci_req_sync_unlock(hdev);
4789 return err;
4790 }
4791
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4792 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4793 struct mgmt_cp_set_exp_feature *cp,
4794 u16 data_len)
4795 {
4796 bool val, changed;
4797 int err;
4798 struct mgmt_rp_set_exp_feature rp;
4799
4800 /* Command requires to use a valid controller index */
4801 if (!hdev)
4802 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4803 MGMT_OP_SET_EXP_FEATURE,
4804 MGMT_STATUS_INVALID_INDEX);
4805
4806 /* Parameters are limited to a single octet */
4807 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4808 return mgmt_cmd_status(sk, hdev->id,
4809 MGMT_OP_SET_EXP_FEATURE,
4810 MGMT_STATUS_INVALID_PARAMS);
4811
4812 /* Only boolean on/off is supported */
4813 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4814 return mgmt_cmd_status(sk, hdev->id,
4815 MGMT_OP_SET_EXP_FEATURE,
4816 MGMT_STATUS_INVALID_PARAMS);
4817
4818 val = !!cp->param[0];
4819 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4820
4821 if (!hdev->get_data_path_id) {
4822 return mgmt_cmd_status(sk, hdev->id,
4823 MGMT_OP_SET_EXP_FEATURE,
4824 MGMT_STATUS_NOT_SUPPORTED);
4825 }
4826
4827 if (changed) {
4828 if (val)
4829 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4830 else
4831 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4832 }
4833
4834 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4835 val, changed);
4836
4837 memcpy(rp.uuid, offload_codecs_uuid, 16);
4838 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4839 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4840 err = mgmt_cmd_complete(sk, hdev->id,
4841 MGMT_OP_SET_EXP_FEATURE, 0,
4842 &rp, sizeof(rp));
4843
4844 if (changed)
4845 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4846
4847 return err;
4848 }
4849
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4850 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4851 struct mgmt_cp_set_exp_feature *cp,
4852 u16 data_len)
4853 {
4854 bool val, changed;
4855 int err;
4856 struct mgmt_rp_set_exp_feature rp;
4857
4858 /* Command requires to use a valid controller index */
4859 if (!hdev)
4860 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4861 MGMT_OP_SET_EXP_FEATURE,
4862 MGMT_STATUS_INVALID_INDEX);
4863
4864 /* Parameters are limited to a single octet */
4865 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4866 return mgmt_cmd_status(sk, hdev->id,
4867 MGMT_OP_SET_EXP_FEATURE,
4868 MGMT_STATUS_INVALID_PARAMS);
4869
4870 /* Only boolean on/off is supported */
4871 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4872 return mgmt_cmd_status(sk, hdev->id,
4873 MGMT_OP_SET_EXP_FEATURE,
4874 MGMT_STATUS_INVALID_PARAMS);
4875
4876 val = !!cp->param[0];
4877 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4878
4879 if (!hci_dev_le_state_simultaneous(hdev)) {
4880 return mgmt_cmd_status(sk, hdev->id,
4881 MGMT_OP_SET_EXP_FEATURE,
4882 MGMT_STATUS_NOT_SUPPORTED);
4883 }
4884
4885 if (changed) {
4886 if (val)
4887 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4888 else
4889 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4890 }
4891
4892 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4893 val, changed);
4894
4895 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4896 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4897 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4898 err = mgmt_cmd_complete(sk, hdev->id,
4899 MGMT_OP_SET_EXP_FEATURE, 0,
4900 &rp, sizeof(rp));
4901
4902 if (changed)
4903 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4904
4905 return err;
4906 }
4907
4908 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4909 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4910 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4911 {
4912 struct mgmt_rp_set_exp_feature rp;
4913 bool val, changed = false;
4914 int err;
4915
4916 /* Command requires to use the non-controller index */
4917 if (hdev)
4918 return mgmt_cmd_status(sk, hdev->id,
4919 MGMT_OP_SET_EXP_FEATURE,
4920 MGMT_STATUS_INVALID_INDEX);
4921
4922 /* Parameters are limited to a single octet */
4923 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4924 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4925 MGMT_OP_SET_EXP_FEATURE,
4926 MGMT_STATUS_INVALID_PARAMS);
4927
4928 /* Only boolean on/off is supported */
4929 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4930 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4931 MGMT_OP_SET_EXP_FEATURE,
4932 MGMT_STATUS_INVALID_PARAMS);
4933
4934 val = cp->param[0] ? true : false;
4935 if (val)
4936 err = iso_init();
4937 else
4938 err = iso_exit();
4939
4940 if (!err)
4941 changed = true;
4942
4943 memcpy(rp.uuid, iso_socket_uuid, 16);
4944 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4945
4946 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4947
4948 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4949 MGMT_OP_SET_EXP_FEATURE, 0,
4950 &rp, sizeof(rp));
4951
4952 if (changed)
4953 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4954
4955 return err;
4956 }
4957 #endif
4958
4959 static const struct mgmt_exp_feature {
4960 const u8 *uuid;
4961 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4962 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4963 } exp_features[] = {
4964 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4965 #ifdef CONFIG_BT_FEATURE_DEBUG
4966 EXP_FEAT(debug_uuid, set_debug_func),
4967 #endif
4968 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4969 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4970 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4971 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4972 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4973 #ifdef CONFIG_BT_LE
4974 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4975 #endif
4976
4977 /* end with a null feature */
4978 EXP_FEAT(NULL, NULL)
4979 };
4980
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4981 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4982 void *data, u16 data_len)
4983 {
4984 struct mgmt_cp_set_exp_feature *cp = data;
4985 size_t i = 0;
4986
4987 bt_dev_dbg(hdev, "sock %p", sk);
4988
4989 for (i = 0; exp_features[i].uuid; i++) {
4990 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4991 return exp_features[i].set_func(sk, hdev, cp, data_len);
4992 }
4993
4994 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4995 MGMT_OP_SET_EXP_FEATURE,
4996 MGMT_STATUS_NOT_SUPPORTED);
4997 }
4998
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4999 static u32 get_params_flags(struct hci_dev *hdev,
5000 struct hci_conn_params *params)
5001 {
5002 u32 flags = hdev->conn_flags;
5003
5004 /* Devices using RPAs can only be programmed in the acceptlist if
5005 * LL Privacy has been enable otherwise they cannot mark
5006 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5007 */
5008 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5009 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5010 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5011
5012 return flags;
5013 }
5014
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5015 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5016 u16 data_len)
5017 {
5018 struct mgmt_cp_get_device_flags *cp = data;
5019 struct mgmt_rp_get_device_flags rp;
5020 struct bdaddr_list_with_flags *br_params;
5021 struct hci_conn_params *params;
5022 u32 supported_flags;
5023 u32 current_flags = 0;
5024 u8 status = MGMT_STATUS_INVALID_PARAMS;
5025
5026 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5027 &cp->addr.bdaddr, cp->addr.type);
5028
5029 hci_dev_lock(hdev);
5030
5031 supported_flags = hdev->conn_flags;
5032
5033 memset(&rp, 0, sizeof(rp));
5034
5035 if (cp->addr.type == BDADDR_BREDR) {
5036 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5037 &cp->addr.bdaddr,
5038 cp->addr.type);
5039 if (!br_params)
5040 goto done;
5041
5042 current_flags = br_params->flags;
5043 } else {
5044 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5045 le_addr_type(cp->addr.type));
5046 if (!params)
5047 goto done;
5048
5049 supported_flags = get_params_flags(hdev, params);
5050 current_flags = params->flags;
5051 }
5052
5053 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5054 rp.addr.type = cp->addr.type;
5055 rp.supported_flags = cpu_to_le32(supported_flags);
5056 rp.current_flags = cpu_to_le32(current_flags);
5057
5058 status = MGMT_STATUS_SUCCESS;
5059
5060 done:
5061 hci_dev_unlock(hdev);
5062
5063 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5064 &rp, sizeof(rp));
5065 }
5066
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5067 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5068 bdaddr_t *bdaddr, u8 bdaddr_type,
5069 u32 supported_flags, u32 current_flags)
5070 {
5071 struct mgmt_ev_device_flags_changed ev;
5072
5073 bacpy(&ev.addr.bdaddr, bdaddr);
5074 ev.addr.type = bdaddr_type;
5075 ev.supported_flags = cpu_to_le32(supported_flags);
5076 ev.current_flags = cpu_to_le32(current_flags);
5077
5078 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5079 }
5080
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5081 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5082 u16 len)
5083 {
5084 struct mgmt_cp_set_device_flags *cp = data;
5085 struct bdaddr_list_with_flags *br_params;
5086 struct hci_conn_params *params;
5087 u8 status = MGMT_STATUS_INVALID_PARAMS;
5088 u32 supported_flags;
5089 u32 current_flags = __le32_to_cpu(cp->current_flags);
5090
5091 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5092 &cp->addr.bdaddr, cp->addr.type, current_flags);
5093
5094 // We should take hci_dev_lock() early, I think.. conn_flags can change
5095 supported_flags = hdev->conn_flags;
5096
5097 if ((supported_flags | current_flags) != supported_flags) {
5098 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5099 current_flags, supported_flags);
5100 goto done;
5101 }
5102
5103 hci_dev_lock(hdev);
5104
5105 if (cp->addr.type == BDADDR_BREDR) {
5106 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5107 &cp->addr.bdaddr,
5108 cp->addr.type);
5109
5110 if (br_params) {
5111 br_params->flags = current_flags;
5112 status = MGMT_STATUS_SUCCESS;
5113 } else {
5114 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5115 &cp->addr.bdaddr, cp->addr.type);
5116 }
5117
5118 goto unlock;
5119 }
5120
5121 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5122 le_addr_type(cp->addr.type));
5123 if (!params) {
5124 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5125 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5126 goto unlock;
5127 }
5128
5129 supported_flags = get_params_flags(hdev, params);
5130
5131 if ((supported_flags | current_flags) != supported_flags) {
5132 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5133 current_flags, supported_flags);
5134 goto unlock;
5135 }
5136
5137 WRITE_ONCE(params->flags, current_flags);
5138 status = MGMT_STATUS_SUCCESS;
5139
5140 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5141 * has been set.
5142 */
5143 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5144 hci_update_passive_scan(hdev);
5145
5146 unlock:
5147 hci_dev_unlock(hdev);
5148
5149 done:
5150 if (status == MGMT_STATUS_SUCCESS)
5151 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5152 supported_flags, current_flags);
5153
5154 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5155 &cp->addr, sizeof(cp->addr));
5156 }
5157
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5158 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5159 u16 handle)
5160 {
5161 struct mgmt_ev_adv_monitor_added ev;
5162
5163 ev.monitor_handle = cpu_to_le16(handle);
5164
5165 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5166 }
5167
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5168 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5169 {
5170 struct mgmt_ev_adv_monitor_removed ev;
5171 struct mgmt_pending_cmd *cmd;
5172 struct sock *sk_skip = NULL;
5173 struct mgmt_cp_remove_adv_monitor *cp;
5174
5175 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5176 if (cmd) {
5177 cp = cmd->param;
5178
5179 if (cp->monitor_handle)
5180 sk_skip = cmd->sk;
5181 }
5182
5183 ev.monitor_handle = cpu_to_le16(handle);
5184
5185 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5186 }
5187
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5188 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5189 void *data, u16 len)
5190 {
5191 struct adv_monitor *monitor = NULL;
5192 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5193 int handle, err;
5194 size_t rp_size = 0;
5195 __u32 supported = 0;
5196 __u32 enabled = 0;
5197 __u16 num_handles = 0;
5198 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5199
5200 BT_DBG("request for %s", hdev->name);
5201
5202 hci_dev_lock(hdev);
5203
5204 if (msft_monitor_supported(hdev))
5205 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5206
5207 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5208 handles[num_handles++] = monitor->handle;
5209
5210 hci_dev_unlock(hdev);
5211
5212 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5213 rp = kmalloc(rp_size, GFP_KERNEL);
5214 if (!rp)
5215 return -ENOMEM;
5216
5217 /* All supported features are currently enabled */
5218 enabled = supported;
5219
5220 rp->supported_features = cpu_to_le32(supported);
5221 rp->enabled_features = cpu_to_le32(enabled);
5222 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5223 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5224 rp->num_handles = cpu_to_le16(num_handles);
5225 if (num_handles)
5226 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5227
5228 err = mgmt_cmd_complete(sk, hdev->id,
5229 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5230 MGMT_STATUS_SUCCESS, rp, rp_size);
5231
5232 kfree(rp);
5233
5234 return err;
5235 }
5236
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5237 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5238 void *data, int status)
5239 {
5240 struct mgmt_rp_add_adv_patterns_monitor rp;
5241 struct mgmt_pending_cmd *cmd = data;
5242 struct adv_monitor *monitor = cmd->user_data;
5243
5244 hci_dev_lock(hdev);
5245
5246 rp.monitor_handle = cpu_to_le16(monitor->handle);
5247
5248 if (!status) {
5249 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5250 hdev->adv_monitors_cnt++;
5251 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5252 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5253 hci_update_passive_scan(hdev);
5254 }
5255
5256 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5257 mgmt_status(status), &rp, sizeof(rp));
5258 mgmt_pending_remove(cmd);
5259
5260 hci_dev_unlock(hdev);
5261 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5262 rp.monitor_handle, status);
5263 }
5264
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5265 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5266 {
5267 struct mgmt_pending_cmd *cmd = data;
5268 struct adv_monitor *monitor = cmd->user_data;
5269
5270 return hci_add_adv_monitor(hdev, monitor);
5271 }
5272
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5273 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5274 struct adv_monitor *m, u8 status,
5275 void *data, u16 len, u16 op)
5276 {
5277 struct mgmt_pending_cmd *cmd;
5278 int err;
5279
5280 hci_dev_lock(hdev);
5281
5282 if (status)
5283 goto unlock;
5284
5285 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5286 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5287 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5288 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5289 status = MGMT_STATUS_BUSY;
5290 goto unlock;
5291 }
5292
5293 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5294 if (!cmd) {
5295 status = MGMT_STATUS_NO_RESOURCES;
5296 goto unlock;
5297 }
5298
5299 cmd->user_data = m;
5300 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5301 mgmt_add_adv_patterns_monitor_complete);
5302 if (err) {
5303 if (err == -ENOMEM)
5304 status = MGMT_STATUS_NO_RESOURCES;
5305 else
5306 status = MGMT_STATUS_FAILED;
5307
5308 goto unlock;
5309 }
5310
5311 hci_dev_unlock(hdev);
5312
5313 return 0;
5314
5315 unlock:
5316 hci_free_adv_monitor(hdev, m);
5317 hci_dev_unlock(hdev);
5318 return mgmt_cmd_status(sk, hdev->id, op, status);
5319 }
5320
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5321 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5322 struct mgmt_adv_rssi_thresholds *rssi)
5323 {
5324 if (rssi) {
5325 m->rssi.low_threshold = rssi->low_threshold;
5326 m->rssi.low_threshold_timeout =
5327 __le16_to_cpu(rssi->low_threshold_timeout);
5328 m->rssi.high_threshold = rssi->high_threshold;
5329 m->rssi.high_threshold_timeout =
5330 __le16_to_cpu(rssi->high_threshold_timeout);
5331 m->rssi.sampling_period = rssi->sampling_period;
5332 } else {
5333 /* Default values. These numbers are the least constricting
5334 * parameters for MSFT API to work, so it behaves as if there
5335 * are no rssi parameter to consider. May need to be changed
5336 * if other API are to be supported.
5337 */
5338 m->rssi.low_threshold = -127;
5339 m->rssi.low_threshold_timeout = 60;
5340 m->rssi.high_threshold = -127;
5341 m->rssi.high_threshold_timeout = 0;
5342 m->rssi.sampling_period = 0;
5343 }
5344 }
5345
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5346 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5347 struct mgmt_adv_pattern *patterns)
5348 {
5349 u8 offset = 0, length = 0;
5350 struct adv_pattern *p = NULL;
5351 int i;
5352
5353 for (i = 0; i < pattern_count; i++) {
5354 offset = patterns[i].offset;
5355 length = patterns[i].length;
5356 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5357 length > HCI_MAX_EXT_AD_LENGTH ||
5358 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5359 return MGMT_STATUS_INVALID_PARAMS;
5360
5361 p = kmalloc(sizeof(*p), GFP_KERNEL);
5362 if (!p)
5363 return MGMT_STATUS_NO_RESOURCES;
5364
5365 p->ad_type = patterns[i].ad_type;
5366 p->offset = patterns[i].offset;
5367 p->length = patterns[i].length;
5368 memcpy(p->value, patterns[i].value, p->length);
5369
5370 INIT_LIST_HEAD(&p->list);
5371 list_add(&p->list, &m->patterns);
5372 }
5373
5374 return MGMT_STATUS_SUCCESS;
5375 }
5376
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5377 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5378 void *data, u16 len)
5379 {
5380 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5381 struct adv_monitor *m = NULL;
5382 u8 status = MGMT_STATUS_SUCCESS;
5383 size_t expected_size = sizeof(*cp);
5384
5385 BT_DBG("request for %s", hdev->name);
5386
5387 if (len <= sizeof(*cp)) {
5388 status = MGMT_STATUS_INVALID_PARAMS;
5389 goto done;
5390 }
5391
5392 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5393 if (len != expected_size) {
5394 status = MGMT_STATUS_INVALID_PARAMS;
5395 goto done;
5396 }
5397
5398 m = kzalloc(sizeof(*m), GFP_KERNEL);
5399 if (!m) {
5400 status = MGMT_STATUS_NO_RESOURCES;
5401 goto done;
5402 }
5403
5404 INIT_LIST_HEAD(&m->patterns);
5405
5406 parse_adv_monitor_rssi(m, NULL);
5407 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5408
5409 done:
5410 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5411 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5412 }
5413
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5414 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5415 void *data, u16 len)
5416 {
5417 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5418 struct adv_monitor *m = NULL;
5419 u8 status = MGMT_STATUS_SUCCESS;
5420 size_t expected_size = sizeof(*cp);
5421
5422 BT_DBG("request for %s", hdev->name);
5423
5424 if (len <= sizeof(*cp)) {
5425 status = MGMT_STATUS_INVALID_PARAMS;
5426 goto done;
5427 }
5428
5429 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5430 if (len != expected_size) {
5431 status = MGMT_STATUS_INVALID_PARAMS;
5432 goto done;
5433 }
5434
5435 m = kzalloc(sizeof(*m), GFP_KERNEL);
5436 if (!m) {
5437 status = MGMT_STATUS_NO_RESOURCES;
5438 goto done;
5439 }
5440
5441 INIT_LIST_HEAD(&m->patterns);
5442
5443 parse_adv_monitor_rssi(m, &cp->rssi);
5444 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5445
5446 done:
5447 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5448 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5449 }
5450
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5451 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5452 void *data, int status)
5453 {
5454 struct mgmt_rp_remove_adv_monitor rp;
5455 struct mgmt_pending_cmd *cmd = data;
5456 struct mgmt_cp_remove_adv_monitor *cp;
5457
5458 if (status == -ECANCELED ||
5459 cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5460 return;
5461
5462 hci_dev_lock(hdev);
5463
5464 cp = cmd->param;
5465
5466 rp.monitor_handle = cp->monitor_handle;
5467
5468 if (!status)
5469 hci_update_passive_scan(hdev);
5470
5471 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5472 mgmt_status(status), &rp, sizeof(rp));
5473 mgmt_pending_remove(cmd);
5474
5475 hci_dev_unlock(hdev);
5476 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5477 rp.monitor_handle, status);
5478 }
5479
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5480 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5481 {
5482 struct mgmt_pending_cmd *cmd = data;
5483
5484 if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5485 return -ECANCELED;
5486
5487 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5488 u16 handle = __le16_to_cpu(cp->monitor_handle);
5489
5490 if (!handle)
5491 return hci_remove_all_adv_monitor(hdev);
5492
5493 return hci_remove_single_adv_monitor(hdev, handle);
5494 }
5495
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5496 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5497 void *data, u16 len)
5498 {
5499 struct mgmt_pending_cmd *cmd;
5500 int err, status;
5501
5502 hci_dev_lock(hdev);
5503
5504 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5505 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5506 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5507 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5508 status = MGMT_STATUS_BUSY;
5509 goto unlock;
5510 }
5511
5512 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5513 if (!cmd) {
5514 status = MGMT_STATUS_NO_RESOURCES;
5515 goto unlock;
5516 }
5517
5518 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5519 mgmt_remove_adv_monitor_complete);
5520
5521 if (err) {
5522 mgmt_pending_remove(cmd);
5523
5524 if (err == -ENOMEM)
5525 status = MGMT_STATUS_NO_RESOURCES;
5526 else
5527 status = MGMT_STATUS_FAILED;
5528
5529 goto unlock;
5530 }
5531
5532 hci_dev_unlock(hdev);
5533
5534 return 0;
5535
5536 unlock:
5537 hci_dev_unlock(hdev);
5538 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5539 status);
5540 }
5541
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5542 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5543 {
5544 struct mgmt_rp_read_local_oob_data mgmt_rp;
5545 size_t rp_size = sizeof(mgmt_rp);
5546 struct mgmt_pending_cmd *cmd = data;
5547 struct sk_buff *skb = cmd->skb;
5548 u8 status = mgmt_status(err);
5549
5550 if (!status) {
5551 if (!skb)
5552 status = MGMT_STATUS_FAILED;
5553 else if (IS_ERR(skb))
5554 status = mgmt_status(PTR_ERR(skb));
5555 else
5556 status = mgmt_status(skb->data[0]);
5557 }
5558
5559 bt_dev_dbg(hdev, "status %d", status);
5560
5561 if (status) {
5562 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5563 goto remove;
5564 }
5565
5566 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5567
5568 if (!bredr_sc_enabled(hdev)) {
5569 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5570
5571 if (skb->len < sizeof(*rp)) {
5572 mgmt_cmd_status(cmd->sk, hdev->id,
5573 MGMT_OP_READ_LOCAL_OOB_DATA,
5574 MGMT_STATUS_FAILED);
5575 goto remove;
5576 }
5577
5578 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5579 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5580
5581 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5582 } else {
5583 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5584
5585 if (skb->len < sizeof(*rp)) {
5586 mgmt_cmd_status(cmd->sk, hdev->id,
5587 MGMT_OP_READ_LOCAL_OOB_DATA,
5588 MGMT_STATUS_FAILED);
5589 goto remove;
5590 }
5591
5592 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5593 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5594
5595 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5596 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5597 }
5598
5599 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5600 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5601
5602 remove:
5603 if (skb && !IS_ERR(skb))
5604 kfree_skb(skb);
5605
5606 mgmt_pending_free(cmd);
5607 }
5608
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5609 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5610 {
5611 struct mgmt_pending_cmd *cmd = data;
5612
5613 if (bredr_sc_enabled(hdev))
5614 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5615 else
5616 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5617
5618 if (IS_ERR(cmd->skb))
5619 return PTR_ERR(cmd->skb);
5620 else
5621 return 0;
5622 }
5623
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5624 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5625 void *data, u16 data_len)
5626 {
5627 struct mgmt_pending_cmd *cmd;
5628 int err;
5629
5630 bt_dev_dbg(hdev, "sock %p", sk);
5631
5632 hci_dev_lock(hdev);
5633
5634 if (!hdev_is_powered(hdev)) {
5635 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5636 MGMT_STATUS_NOT_POWERED);
5637 goto unlock;
5638 }
5639
5640 if (!lmp_ssp_capable(hdev)) {
5641 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5642 MGMT_STATUS_NOT_SUPPORTED);
5643 goto unlock;
5644 }
5645
5646 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5647 if (!cmd)
5648 err = -ENOMEM;
5649 else
5650 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5651 read_local_oob_data_complete);
5652
5653 if (err < 0) {
5654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5655 MGMT_STATUS_FAILED);
5656
5657 if (cmd)
5658 mgmt_pending_free(cmd);
5659 }
5660
5661 unlock:
5662 hci_dev_unlock(hdev);
5663 return err;
5664 }
5665
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5666 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5667 void *data, u16 len)
5668 {
5669 struct mgmt_addr_info *addr = data;
5670 int err;
5671
5672 bt_dev_dbg(hdev, "sock %p", sk);
5673
5674 if (!bdaddr_type_is_valid(addr->type))
5675 return mgmt_cmd_complete(sk, hdev->id,
5676 MGMT_OP_ADD_REMOTE_OOB_DATA,
5677 MGMT_STATUS_INVALID_PARAMS,
5678 addr, sizeof(*addr));
5679
5680 hci_dev_lock(hdev);
5681
5682 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5683 struct mgmt_cp_add_remote_oob_data *cp = data;
5684 u8 status;
5685
5686 if (cp->addr.type != BDADDR_BREDR) {
5687 err = mgmt_cmd_complete(sk, hdev->id,
5688 MGMT_OP_ADD_REMOTE_OOB_DATA,
5689 MGMT_STATUS_INVALID_PARAMS,
5690 &cp->addr, sizeof(cp->addr));
5691 goto unlock;
5692 }
5693
5694 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5695 cp->addr.type, cp->hash,
5696 cp->rand, NULL, NULL);
5697 if (err < 0)
5698 status = MGMT_STATUS_FAILED;
5699 else
5700 status = MGMT_STATUS_SUCCESS;
5701
5702 err = mgmt_cmd_complete(sk, hdev->id,
5703 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5704 &cp->addr, sizeof(cp->addr));
5705 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5706 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5707 u8 *rand192, *hash192, *rand256, *hash256;
5708 u8 status;
5709
5710 if (bdaddr_type_is_le(cp->addr.type)) {
5711 /* Enforce zero-valued 192-bit parameters as
5712 * long as legacy SMP OOB isn't implemented.
5713 */
5714 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5715 memcmp(cp->hash192, ZERO_KEY, 16)) {
5716 err = mgmt_cmd_complete(sk, hdev->id,
5717 MGMT_OP_ADD_REMOTE_OOB_DATA,
5718 MGMT_STATUS_INVALID_PARAMS,
5719 addr, sizeof(*addr));
5720 goto unlock;
5721 }
5722
5723 rand192 = NULL;
5724 hash192 = NULL;
5725 } else {
5726 /* In case one of the P-192 values is set to zero,
5727 * then just disable OOB data for P-192.
5728 */
5729 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5730 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5731 rand192 = NULL;
5732 hash192 = NULL;
5733 } else {
5734 rand192 = cp->rand192;
5735 hash192 = cp->hash192;
5736 }
5737 }
5738
5739 /* In case one of the P-256 values is set to zero, then just
5740 * disable OOB data for P-256.
5741 */
5742 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5743 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5744 rand256 = NULL;
5745 hash256 = NULL;
5746 } else {
5747 rand256 = cp->rand256;
5748 hash256 = cp->hash256;
5749 }
5750
5751 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5752 cp->addr.type, hash192, rand192,
5753 hash256, rand256);
5754 if (err < 0)
5755 status = MGMT_STATUS_FAILED;
5756 else
5757 status = MGMT_STATUS_SUCCESS;
5758
5759 err = mgmt_cmd_complete(sk, hdev->id,
5760 MGMT_OP_ADD_REMOTE_OOB_DATA,
5761 status, &cp->addr, sizeof(cp->addr));
5762 } else {
5763 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5764 len);
5765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5766 MGMT_STATUS_INVALID_PARAMS);
5767 }
5768
5769 unlock:
5770 hci_dev_unlock(hdev);
5771 return err;
5772 }
5773
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5774 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5775 void *data, u16 len)
5776 {
5777 struct mgmt_cp_remove_remote_oob_data *cp = data;
5778 u8 status;
5779 int err;
5780
5781 bt_dev_dbg(hdev, "sock %p", sk);
5782
5783 if (cp->addr.type != BDADDR_BREDR)
5784 return mgmt_cmd_complete(sk, hdev->id,
5785 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5786 MGMT_STATUS_INVALID_PARAMS,
5787 &cp->addr, sizeof(cp->addr));
5788
5789 hci_dev_lock(hdev);
5790
5791 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5792 hci_remote_oob_data_clear(hdev);
5793 status = MGMT_STATUS_SUCCESS;
5794 goto done;
5795 }
5796
5797 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5798 if (err < 0)
5799 status = MGMT_STATUS_INVALID_PARAMS;
5800 else
5801 status = MGMT_STATUS_SUCCESS;
5802
5803 done:
5804 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5805 status, &cp->addr, sizeof(cp->addr));
5806
5807 hci_dev_unlock(hdev);
5808 return err;
5809 }
5810
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5811 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5812 {
5813 struct mgmt_pending_cmd *cmd;
5814
5815 bt_dev_dbg(hdev, "status %u", status);
5816
5817 hci_dev_lock(hdev);
5818
5819 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5820 if (!cmd)
5821 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5822
5823 if (!cmd)
5824 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5825
5826 if (cmd) {
5827 cmd->cmd_complete(cmd, mgmt_status(status));
5828 mgmt_pending_remove(cmd);
5829 }
5830
5831 hci_dev_unlock(hdev);
5832 }
5833
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5834 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5835 uint8_t *mgmt_status)
5836 {
5837 switch (type) {
5838 case DISCOV_TYPE_LE:
5839 *mgmt_status = mgmt_le_support(hdev);
5840 if (*mgmt_status)
5841 return false;
5842 break;
5843 case DISCOV_TYPE_INTERLEAVED:
5844 *mgmt_status = mgmt_le_support(hdev);
5845 if (*mgmt_status)
5846 return false;
5847 fallthrough;
5848 case DISCOV_TYPE_BREDR:
5849 *mgmt_status = mgmt_bredr_support(hdev);
5850 if (*mgmt_status)
5851 return false;
5852 break;
5853 default:
5854 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5855 return false;
5856 }
5857
5858 return true;
5859 }
5860
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5861 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5862 {
5863 struct mgmt_pending_cmd *cmd = data;
5864
5865 bt_dev_dbg(hdev, "err %d", err);
5866
5867 if (err == -ECANCELED)
5868 return;
5869
5870 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5871 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5872 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5873 return;
5874
5875 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5876 cmd->param, 1);
5877 mgmt_pending_remove(cmd);
5878
5879 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5880 DISCOVERY_FINDING);
5881 }
5882
start_discovery_sync(struct hci_dev * hdev,void * data)5883 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5884 {
5885 return hci_start_discovery_sync(hdev);
5886 }
5887
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5888 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5889 u16 op, void *data, u16 len)
5890 {
5891 struct mgmt_cp_start_discovery *cp = data;
5892 struct mgmt_pending_cmd *cmd;
5893 u8 status;
5894 int err;
5895
5896 bt_dev_dbg(hdev, "sock %p", sk);
5897
5898 hci_dev_lock(hdev);
5899
5900 if (!hdev_is_powered(hdev)) {
5901 err = mgmt_cmd_complete(sk, hdev->id, op,
5902 MGMT_STATUS_NOT_POWERED,
5903 &cp->type, sizeof(cp->type));
5904 goto failed;
5905 }
5906
5907 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5908 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5909 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5910 &cp->type, sizeof(cp->type));
5911 goto failed;
5912 }
5913
5914 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5915 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5916 &cp->type, sizeof(cp->type));
5917 goto failed;
5918 }
5919
5920 /* Can't start discovery when it is paused */
5921 if (hdev->discovery_paused) {
5922 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5923 &cp->type, sizeof(cp->type));
5924 goto failed;
5925 }
5926
5927 /* Clear the discovery filter first to free any previously
5928 * allocated memory for the UUID list.
5929 */
5930 hci_discovery_filter_clear(hdev);
5931
5932 hdev->discovery.type = cp->type;
5933 hdev->discovery.report_invalid_rssi = false;
5934 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5935 hdev->discovery.limited = true;
5936 else
5937 hdev->discovery.limited = false;
5938
5939 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5940 if (!cmd) {
5941 err = -ENOMEM;
5942 goto failed;
5943 }
5944
5945 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5946 start_discovery_complete);
5947 if (err < 0) {
5948 mgmt_pending_remove(cmd);
5949 goto failed;
5950 }
5951
5952 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5953
5954 failed:
5955 hci_dev_unlock(hdev);
5956 return err;
5957 }
5958
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5959 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5960 void *data, u16 len)
5961 {
5962 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5963 data, len);
5964 }
5965
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5966 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5967 void *data, u16 len)
5968 {
5969 return start_discovery_internal(sk, hdev,
5970 MGMT_OP_START_LIMITED_DISCOVERY,
5971 data, len);
5972 }
5973
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5974 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5975 void *data, u16 len)
5976 {
5977 struct mgmt_cp_start_service_discovery *cp = data;
5978 struct mgmt_pending_cmd *cmd;
5979 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5980 u16 uuid_count, expected_len;
5981 u8 status;
5982 int err;
5983
5984 bt_dev_dbg(hdev, "sock %p", sk);
5985
5986 hci_dev_lock(hdev);
5987
5988 if (!hdev_is_powered(hdev)) {
5989 err = mgmt_cmd_complete(sk, hdev->id,
5990 MGMT_OP_START_SERVICE_DISCOVERY,
5991 MGMT_STATUS_NOT_POWERED,
5992 &cp->type, sizeof(cp->type));
5993 goto failed;
5994 }
5995
5996 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5997 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5998 err = mgmt_cmd_complete(sk, hdev->id,
5999 MGMT_OP_START_SERVICE_DISCOVERY,
6000 MGMT_STATUS_BUSY, &cp->type,
6001 sizeof(cp->type));
6002 goto failed;
6003 }
6004
6005 if (hdev->discovery_paused) {
6006 err = mgmt_cmd_complete(sk, hdev->id,
6007 MGMT_OP_START_SERVICE_DISCOVERY,
6008 MGMT_STATUS_BUSY, &cp->type,
6009 sizeof(cp->type));
6010 goto failed;
6011 }
6012
6013 uuid_count = __le16_to_cpu(cp->uuid_count);
6014 if (uuid_count > max_uuid_count) {
6015 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6016 uuid_count);
6017 err = mgmt_cmd_complete(sk, hdev->id,
6018 MGMT_OP_START_SERVICE_DISCOVERY,
6019 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6020 sizeof(cp->type));
6021 goto failed;
6022 }
6023
6024 expected_len = sizeof(*cp) + uuid_count * 16;
6025 if (expected_len != len) {
6026 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6027 expected_len, len);
6028 err = mgmt_cmd_complete(sk, hdev->id,
6029 MGMT_OP_START_SERVICE_DISCOVERY,
6030 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6031 sizeof(cp->type));
6032 goto failed;
6033 }
6034
6035 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6036 err = mgmt_cmd_complete(sk, hdev->id,
6037 MGMT_OP_START_SERVICE_DISCOVERY,
6038 status, &cp->type, sizeof(cp->type));
6039 goto failed;
6040 }
6041
6042 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6043 hdev, data, len);
6044 if (!cmd) {
6045 err = -ENOMEM;
6046 goto failed;
6047 }
6048
6049 /* Clear the discovery filter first to free any previously
6050 * allocated memory for the UUID list.
6051 */
6052 hci_discovery_filter_clear(hdev);
6053
6054 hdev->discovery.result_filtering = true;
6055 hdev->discovery.type = cp->type;
6056 hdev->discovery.rssi = cp->rssi;
6057 hdev->discovery.uuid_count = uuid_count;
6058
6059 if (uuid_count > 0) {
6060 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6061 GFP_KERNEL);
6062 if (!hdev->discovery.uuids) {
6063 err = mgmt_cmd_complete(sk, hdev->id,
6064 MGMT_OP_START_SERVICE_DISCOVERY,
6065 MGMT_STATUS_FAILED,
6066 &cp->type, sizeof(cp->type));
6067 mgmt_pending_remove(cmd);
6068 goto failed;
6069 }
6070 }
6071
6072 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6073 start_discovery_complete);
6074 if (err < 0) {
6075 mgmt_pending_remove(cmd);
6076 goto failed;
6077 }
6078
6079 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6080
6081 failed:
6082 hci_dev_unlock(hdev);
6083 return err;
6084 }
6085
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6086 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6087 {
6088 struct mgmt_pending_cmd *cmd;
6089
6090 bt_dev_dbg(hdev, "status %u", status);
6091
6092 hci_dev_lock(hdev);
6093
6094 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6095 if (cmd) {
6096 cmd->cmd_complete(cmd, mgmt_status(status));
6097 mgmt_pending_remove(cmd);
6098 }
6099
6100 hci_dev_unlock(hdev);
6101 }
6102
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6103 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6104 {
6105 struct mgmt_pending_cmd *cmd = data;
6106
6107 if (err == -ECANCELED ||
6108 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6109 return;
6110
6111 bt_dev_dbg(hdev, "err %d", err);
6112
6113 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6114 cmd->param, 1);
6115 mgmt_pending_remove(cmd);
6116
6117 if (!err)
6118 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6119 }
6120
stop_discovery_sync(struct hci_dev * hdev,void * data)6121 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6122 {
6123 return hci_stop_discovery_sync(hdev);
6124 }
6125
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6126 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6127 u16 len)
6128 {
6129 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6130 struct mgmt_pending_cmd *cmd;
6131 int err;
6132
6133 bt_dev_dbg(hdev, "sock %p", sk);
6134
6135 hci_dev_lock(hdev);
6136
6137 if (!hci_discovery_active(hdev)) {
6138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6139 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6140 sizeof(mgmt_cp->type));
6141 goto unlock;
6142 }
6143
6144 if (hdev->discovery.type != mgmt_cp->type) {
6145 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6146 MGMT_STATUS_INVALID_PARAMS,
6147 &mgmt_cp->type, sizeof(mgmt_cp->type));
6148 goto unlock;
6149 }
6150
6151 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6152 if (!cmd) {
6153 err = -ENOMEM;
6154 goto unlock;
6155 }
6156
6157 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6158 stop_discovery_complete);
6159 if (err < 0) {
6160 mgmt_pending_remove(cmd);
6161 goto unlock;
6162 }
6163
6164 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6165
6166 unlock:
6167 hci_dev_unlock(hdev);
6168 return err;
6169 }
6170
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6171 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6172 u16 len)
6173 {
6174 struct mgmt_cp_confirm_name *cp = data;
6175 struct inquiry_entry *e;
6176 int err;
6177
6178 bt_dev_dbg(hdev, "sock %p", sk);
6179
6180 hci_dev_lock(hdev);
6181
6182 if (!hci_discovery_active(hdev)) {
6183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6184 MGMT_STATUS_FAILED, &cp->addr,
6185 sizeof(cp->addr));
6186 goto failed;
6187 }
6188
6189 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6190 if (!e) {
6191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6192 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6193 sizeof(cp->addr));
6194 goto failed;
6195 }
6196
6197 if (cp->name_known) {
6198 e->name_state = NAME_KNOWN;
6199 list_del(&e->list);
6200 } else {
6201 e->name_state = NAME_NEEDED;
6202 hci_inquiry_cache_update_resolve(hdev, e);
6203 }
6204
6205 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6206 &cp->addr, sizeof(cp->addr));
6207
6208 failed:
6209 hci_dev_unlock(hdev);
6210 return err;
6211 }
6212
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6213 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6214 u16 len)
6215 {
6216 struct mgmt_cp_block_device *cp = data;
6217 u8 status;
6218 int err;
6219
6220 bt_dev_dbg(hdev, "sock %p", sk);
6221
6222 if (!bdaddr_type_is_valid(cp->addr.type))
6223 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6224 MGMT_STATUS_INVALID_PARAMS,
6225 &cp->addr, sizeof(cp->addr));
6226
6227 hci_dev_lock(hdev);
6228
6229 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6230 cp->addr.type);
6231 if (err < 0) {
6232 status = MGMT_STATUS_FAILED;
6233 goto done;
6234 }
6235
6236 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6237 sk);
6238 status = MGMT_STATUS_SUCCESS;
6239
6240 done:
6241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6242 &cp->addr, sizeof(cp->addr));
6243
6244 hci_dev_unlock(hdev);
6245
6246 return err;
6247 }
6248
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6249 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6250 u16 len)
6251 {
6252 struct mgmt_cp_unblock_device *cp = data;
6253 u8 status;
6254 int err;
6255
6256 bt_dev_dbg(hdev, "sock %p", sk);
6257
6258 if (!bdaddr_type_is_valid(cp->addr.type))
6259 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6260 MGMT_STATUS_INVALID_PARAMS,
6261 &cp->addr, sizeof(cp->addr));
6262
6263 hci_dev_lock(hdev);
6264
6265 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6266 cp->addr.type);
6267 if (err < 0) {
6268 status = MGMT_STATUS_INVALID_PARAMS;
6269 goto done;
6270 }
6271
6272 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6273 sk);
6274 status = MGMT_STATUS_SUCCESS;
6275
6276 done:
6277 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6278 &cp->addr, sizeof(cp->addr));
6279
6280 hci_dev_unlock(hdev);
6281
6282 return err;
6283 }
6284
set_device_id_sync(struct hci_dev * hdev,void * data)6285 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6286 {
6287 return hci_update_eir_sync(hdev);
6288 }
6289
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6290 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6291 u16 len)
6292 {
6293 struct mgmt_cp_set_device_id *cp = data;
6294 int err;
6295 __u16 source;
6296
6297 bt_dev_dbg(hdev, "sock %p", sk);
6298
6299 source = __le16_to_cpu(cp->source);
6300
6301 if (source > 0x0002)
6302 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6303 MGMT_STATUS_INVALID_PARAMS);
6304
6305 hci_dev_lock(hdev);
6306
6307 hdev->devid_source = source;
6308 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6309 hdev->devid_product = __le16_to_cpu(cp->product);
6310 hdev->devid_version = __le16_to_cpu(cp->version);
6311
6312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6313 NULL, 0);
6314
6315 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6316
6317 hci_dev_unlock(hdev);
6318
6319 return err;
6320 }
6321
enable_advertising_instance(struct hci_dev * hdev,int err)6322 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6323 {
6324 if (err)
6325 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6326 else
6327 bt_dev_dbg(hdev, "status %d", err);
6328 }
6329
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6330 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6331 {
6332 struct cmd_lookup match = { NULL, hdev };
6333 u8 instance;
6334 struct adv_info *adv_instance;
6335 u8 status = mgmt_status(err);
6336
6337 if (status) {
6338 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6339 cmd_status_rsp, &status);
6340 return;
6341 }
6342
6343 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6344 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6345 else
6346 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6347
6348 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6349 &match);
6350
6351 new_settings(hdev, match.sk);
6352
6353 if (match.sk)
6354 sock_put(match.sk);
6355
6356 /* If "Set Advertising" was just disabled and instance advertising was
6357 * set up earlier, then re-enable multi-instance advertising.
6358 */
6359 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6360 list_empty(&hdev->adv_instances))
6361 return;
6362
6363 instance = hdev->cur_adv_instance;
6364 if (!instance) {
6365 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6366 struct adv_info, list);
6367 if (!adv_instance)
6368 return;
6369
6370 instance = adv_instance->instance;
6371 }
6372
6373 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6374
6375 enable_advertising_instance(hdev, err);
6376 }
6377
set_adv_sync(struct hci_dev * hdev,void * data)6378 static int set_adv_sync(struct hci_dev *hdev, void *data)
6379 {
6380 struct mgmt_pending_cmd *cmd = data;
6381 struct mgmt_mode *cp = cmd->param;
6382 u8 val = !!cp->val;
6383
6384 if (cp->val == 0x02)
6385 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6386 else
6387 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6388
6389 cancel_adv_timeout(hdev);
6390
6391 if (val) {
6392 /* Switch to instance "0" for the Set Advertising setting.
6393 * We cannot use update_[adv|scan_rsp]_data() here as the
6394 * HCI_ADVERTISING flag is not yet set.
6395 */
6396 hdev->cur_adv_instance = 0x00;
6397
6398 if (ext_adv_capable(hdev)) {
6399 hci_start_ext_adv_sync(hdev, 0x00);
6400 } else {
6401 hci_update_adv_data_sync(hdev, 0x00);
6402 hci_update_scan_rsp_data_sync(hdev, 0x00);
6403 hci_enable_advertising_sync(hdev);
6404 }
6405 } else {
6406 hci_disable_advertising_sync(hdev);
6407 }
6408
6409 return 0;
6410 }
6411
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6412 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6413 u16 len)
6414 {
6415 struct mgmt_mode *cp = data;
6416 struct mgmt_pending_cmd *cmd;
6417 u8 val, status;
6418 int err;
6419
6420 bt_dev_dbg(hdev, "sock %p", sk);
6421
6422 status = mgmt_le_support(hdev);
6423 if (status)
6424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6425 status);
6426
6427 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6429 MGMT_STATUS_INVALID_PARAMS);
6430
6431 if (hdev->advertising_paused)
6432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6433 MGMT_STATUS_BUSY);
6434
6435 hci_dev_lock(hdev);
6436
6437 val = !!cp->val;
6438
6439 /* The following conditions are ones which mean that we should
6440 * not do any HCI communication but directly send a mgmt
6441 * response to user space (after toggling the flag if
6442 * necessary).
6443 */
6444 if (!hdev_is_powered(hdev) ||
6445 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6446 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6447 hci_dev_test_flag(hdev, HCI_MESH) ||
6448 hci_conn_num(hdev, LE_LINK) > 0 ||
6449 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6450 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6451 bool changed;
6452
6453 if (cp->val) {
6454 hdev->cur_adv_instance = 0x00;
6455 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6456 if (cp->val == 0x02)
6457 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6458 else
6459 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6460 } else {
6461 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6462 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6463 }
6464
6465 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6466 if (err < 0)
6467 goto unlock;
6468
6469 if (changed)
6470 err = new_settings(hdev, sk);
6471
6472 goto unlock;
6473 }
6474
6475 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6476 pending_find(MGMT_OP_SET_LE, hdev)) {
6477 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6478 MGMT_STATUS_BUSY);
6479 goto unlock;
6480 }
6481
6482 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6483 if (!cmd)
6484 err = -ENOMEM;
6485 else
6486 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6487 set_advertising_complete);
6488
6489 if (err < 0 && cmd)
6490 mgmt_pending_remove(cmd);
6491
6492 unlock:
6493 hci_dev_unlock(hdev);
6494 return err;
6495 }
6496
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6497 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6498 void *data, u16 len)
6499 {
6500 struct mgmt_cp_set_static_address *cp = data;
6501 int err;
6502
6503 bt_dev_dbg(hdev, "sock %p", sk);
6504
6505 if (!lmp_le_capable(hdev))
6506 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6507 MGMT_STATUS_NOT_SUPPORTED);
6508
6509 if (hdev_is_powered(hdev))
6510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6511 MGMT_STATUS_REJECTED);
6512
6513 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6514 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6515 return mgmt_cmd_status(sk, hdev->id,
6516 MGMT_OP_SET_STATIC_ADDRESS,
6517 MGMT_STATUS_INVALID_PARAMS);
6518
6519 /* Two most significant bits shall be set */
6520 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6521 return mgmt_cmd_status(sk, hdev->id,
6522 MGMT_OP_SET_STATIC_ADDRESS,
6523 MGMT_STATUS_INVALID_PARAMS);
6524 }
6525
6526 hci_dev_lock(hdev);
6527
6528 bacpy(&hdev->static_addr, &cp->bdaddr);
6529
6530 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6531 if (err < 0)
6532 goto unlock;
6533
6534 err = new_settings(hdev, sk);
6535
6536 unlock:
6537 hci_dev_unlock(hdev);
6538 return err;
6539 }
6540
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6541 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6542 void *data, u16 len)
6543 {
6544 struct mgmt_cp_set_scan_params *cp = data;
6545 __u16 interval, window;
6546 int err;
6547
6548 bt_dev_dbg(hdev, "sock %p", sk);
6549
6550 if (!lmp_le_capable(hdev))
6551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6552 MGMT_STATUS_NOT_SUPPORTED);
6553
6554 interval = __le16_to_cpu(cp->interval);
6555
6556 if (interval < 0x0004 || interval > 0x4000)
6557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6558 MGMT_STATUS_INVALID_PARAMS);
6559
6560 window = __le16_to_cpu(cp->window);
6561
6562 if (window < 0x0004 || window > 0x4000)
6563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6564 MGMT_STATUS_INVALID_PARAMS);
6565
6566 if (window > interval)
6567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6568 MGMT_STATUS_INVALID_PARAMS);
6569
6570 hci_dev_lock(hdev);
6571
6572 hdev->le_scan_interval = interval;
6573 hdev->le_scan_window = window;
6574
6575 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6576 NULL, 0);
6577
6578 /* If background scan is running, restart it so new parameters are
6579 * loaded.
6580 */
6581 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6582 hdev->discovery.state == DISCOVERY_STOPPED)
6583 hci_update_passive_scan(hdev);
6584
6585 hci_dev_unlock(hdev);
6586
6587 return err;
6588 }
6589
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6590 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6591 {
6592 struct mgmt_pending_cmd *cmd = data;
6593
6594 bt_dev_dbg(hdev, "err %d", err);
6595
6596 if (err) {
6597 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6598 mgmt_status(err));
6599 } else {
6600 struct mgmt_mode *cp = cmd->param;
6601
6602 if (cp->val)
6603 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6604 else
6605 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6606
6607 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6608 new_settings(hdev, cmd->sk);
6609 }
6610
6611 mgmt_pending_free(cmd);
6612 }
6613
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6614 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6615 {
6616 struct mgmt_pending_cmd *cmd = data;
6617 struct mgmt_mode *cp = cmd->param;
6618
6619 return hci_write_fast_connectable_sync(hdev, cp->val);
6620 }
6621
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6622 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6623 void *data, u16 len)
6624 {
6625 struct mgmt_mode *cp = data;
6626 struct mgmt_pending_cmd *cmd;
6627 int err;
6628
6629 bt_dev_dbg(hdev, "sock %p", sk);
6630
6631 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6632 hdev->hci_ver < BLUETOOTH_VER_1_2)
6633 return mgmt_cmd_status(sk, hdev->id,
6634 MGMT_OP_SET_FAST_CONNECTABLE,
6635 MGMT_STATUS_NOT_SUPPORTED);
6636
6637 if (cp->val != 0x00 && cp->val != 0x01)
6638 return mgmt_cmd_status(sk, hdev->id,
6639 MGMT_OP_SET_FAST_CONNECTABLE,
6640 MGMT_STATUS_INVALID_PARAMS);
6641
6642 hci_dev_lock(hdev);
6643
6644 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6645 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6646 goto unlock;
6647 }
6648
6649 if (!hdev_is_powered(hdev)) {
6650 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6651 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6652 new_settings(hdev, sk);
6653 goto unlock;
6654 }
6655
6656 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6657 len);
6658 if (!cmd)
6659 err = -ENOMEM;
6660 else
6661 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6662 fast_connectable_complete);
6663
6664 if (err < 0) {
6665 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6666 MGMT_STATUS_FAILED);
6667
6668 if (cmd)
6669 mgmt_pending_free(cmd);
6670 }
6671
6672 unlock:
6673 hci_dev_unlock(hdev);
6674
6675 return err;
6676 }
6677
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6678 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6679 {
6680 struct mgmt_pending_cmd *cmd = data;
6681
6682 bt_dev_dbg(hdev, "err %d", err);
6683
6684 if (err) {
6685 u8 mgmt_err = mgmt_status(err);
6686
6687 /* We need to restore the flag if related HCI commands
6688 * failed.
6689 */
6690 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6691
6692 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6693 } else {
6694 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6695 new_settings(hdev, cmd->sk);
6696 }
6697
6698 mgmt_pending_free(cmd);
6699 }
6700
set_bredr_sync(struct hci_dev * hdev,void * data)6701 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6702 {
6703 int status;
6704
6705 status = hci_write_fast_connectable_sync(hdev, false);
6706
6707 if (!status)
6708 status = hci_update_scan_sync(hdev);
6709
6710 /* Since only the advertising data flags will change, there
6711 * is no need to update the scan response data.
6712 */
6713 if (!status)
6714 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6715
6716 return status;
6717 }
6718
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6719 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6720 {
6721 struct mgmt_mode *cp = data;
6722 struct mgmt_pending_cmd *cmd;
6723 int err;
6724
6725 bt_dev_dbg(hdev, "sock %p", sk);
6726
6727 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6728 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6729 MGMT_STATUS_NOT_SUPPORTED);
6730
6731 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6732 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6733 MGMT_STATUS_REJECTED);
6734
6735 if (cp->val != 0x00 && cp->val != 0x01)
6736 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6737 MGMT_STATUS_INVALID_PARAMS);
6738
6739 hci_dev_lock(hdev);
6740
6741 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6742 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6743 goto unlock;
6744 }
6745
6746 if (!hdev_is_powered(hdev)) {
6747 if (!cp->val) {
6748 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6749 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6750 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6751 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6752 }
6753
6754 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6755
6756 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6757 if (err < 0)
6758 goto unlock;
6759
6760 err = new_settings(hdev, sk);
6761 goto unlock;
6762 }
6763
6764 /* Reject disabling when powered on */
6765 if (!cp->val) {
6766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6767 MGMT_STATUS_REJECTED);
6768 goto unlock;
6769 } else {
6770 /* When configuring a dual-mode controller to operate
6771 * with LE only and using a static address, then switching
6772 * BR/EDR back on is not allowed.
6773 *
6774 * Dual-mode controllers shall operate with the public
6775 * address as its identity address for BR/EDR and LE. So
6776 * reject the attempt to create an invalid configuration.
6777 *
6778 * The same restrictions applies when secure connections
6779 * has been enabled. For BR/EDR this is a controller feature
6780 * while for LE it is a host stack feature. This means that
6781 * switching BR/EDR back on when secure connections has been
6782 * enabled is not a supported transaction.
6783 */
6784 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6785 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6786 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6787 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6788 MGMT_STATUS_REJECTED);
6789 goto unlock;
6790 }
6791 }
6792
6793 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6794 if (!cmd)
6795 err = -ENOMEM;
6796 else
6797 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6798 set_bredr_complete);
6799
6800 if (err < 0) {
6801 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6802 MGMT_STATUS_FAILED);
6803 if (cmd)
6804 mgmt_pending_free(cmd);
6805
6806 goto unlock;
6807 }
6808
6809 /* We need to flip the bit already here so that
6810 * hci_req_update_adv_data generates the correct flags.
6811 */
6812 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6813
6814 unlock:
6815 hci_dev_unlock(hdev);
6816 return err;
6817 }
6818
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6819 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6820 {
6821 struct mgmt_pending_cmd *cmd = data;
6822 struct mgmt_mode *cp;
6823
6824 bt_dev_dbg(hdev, "err %d", err);
6825
6826 if (err) {
6827 u8 mgmt_err = mgmt_status(err);
6828
6829 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6830 goto done;
6831 }
6832
6833 cp = cmd->param;
6834
6835 switch (cp->val) {
6836 case 0x00:
6837 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6838 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6839 break;
6840 case 0x01:
6841 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6842 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6843 break;
6844 case 0x02:
6845 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6846 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6847 break;
6848 }
6849
6850 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6851 new_settings(hdev, cmd->sk);
6852
6853 done:
6854 mgmt_pending_free(cmd);
6855 }
6856
set_secure_conn_sync(struct hci_dev * hdev,void * data)6857 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6858 {
6859 struct mgmt_pending_cmd *cmd = data;
6860 struct mgmt_mode *cp = cmd->param;
6861 u8 val = !!cp->val;
6862
6863 /* Force write of val */
6864 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6865
6866 return hci_write_sc_support_sync(hdev, val);
6867 }
6868
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6869 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6870 void *data, u16 len)
6871 {
6872 struct mgmt_mode *cp = data;
6873 struct mgmt_pending_cmd *cmd;
6874 u8 val;
6875 int err;
6876
6877 bt_dev_dbg(hdev, "sock %p", sk);
6878
6879 if (!lmp_sc_capable(hdev) &&
6880 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6882 MGMT_STATUS_NOT_SUPPORTED);
6883
6884 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6885 lmp_sc_capable(hdev) &&
6886 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6887 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6888 MGMT_STATUS_REJECTED);
6889
6890 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6892 MGMT_STATUS_INVALID_PARAMS);
6893
6894 hci_dev_lock(hdev);
6895
6896 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6897 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6898 bool changed;
6899
6900 if (cp->val) {
6901 changed = !hci_dev_test_and_set_flag(hdev,
6902 HCI_SC_ENABLED);
6903 if (cp->val == 0x02)
6904 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6905 else
6906 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6907 } else {
6908 changed = hci_dev_test_and_clear_flag(hdev,
6909 HCI_SC_ENABLED);
6910 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6911 }
6912
6913 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6914 if (err < 0)
6915 goto failed;
6916
6917 if (changed)
6918 err = new_settings(hdev, sk);
6919
6920 goto failed;
6921 }
6922
6923 val = !!cp->val;
6924
6925 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6926 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6927 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6928 goto failed;
6929 }
6930
6931 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6932 if (!cmd)
6933 err = -ENOMEM;
6934 else
6935 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6936 set_secure_conn_complete);
6937
6938 if (err < 0) {
6939 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6940 MGMT_STATUS_FAILED);
6941 if (cmd)
6942 mgmt_pending_free(cmd);
6943 }
6944
6945 failed:
6946 hci_dev_unlock(hdev);
6947 return err;
6948 }
6949
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6950 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6951 void *data, u16 len)
6952 {
6953 struct mgmt_mode *cp = data;
6954 bool changed, use_changed;
6955 int err;
6956
6957 bt_dev_dbg(hdev, "sock %p", sk);
6958
6959 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6961 MGMT_STATUS_INVALID_PARAMS);
6962
6963 hci_dev_lock(hdev);
6964
6965 if (cp->val)
6966 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6967 else
6968 changed = hci_dev_test_and_clear_flag(hdev,
6969 HCI_KEEP_DEBUG_KEYS);
6970
6971 if (cp->val == 0x02)
6972 use_changed = !hci_dev_test_and_set_flag(hdev,
6973 HCI_USE_DEBUG_KEYS);
6974 else
6975 use_changed = hci_dev_test_and_clear_flag(hdev,
6976 HCI_USE_DEBUG_KEYS);
6977
6978 if (hdev_is_powered(hdev) && use_changed &&
6979 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6980 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6981 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6982 sizeof(mode), &mode);
6983 }
6984
6985 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6986 if (err < 0)
6987 goto unlock;
6988
6989 if (changed)
6990 err = new_settings(hdev, sk);
6991
6992 unlock:
6993 hci_dev_unlock(hdev);
6994 return err;
6995 }
6996
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6997 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6998 u16 len)
6999 {
7000 struct mgmt_cp_set_privacy *cp = cp_data;
7001 bool changed;
7002 int err;
7003
7004 bt_dev_dbg(hdev, "sock %p", sk);
7005
7006 if (!lmp_le_capable(hdev))
7007 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7008 MGMT_STATUS_NOT_SUPPORTED);
7009
7010 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7011 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7012 MGMT_STATUS_INVALID_PARAMS);
7013
7014 if (hdev_is_powered(hdev))
7015 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7016 MGMT_STATUS_REJECTED);
7017
7018 hci_dev_lock(hdev);
7019
7020 /* If user space supports this command it is also expected to
7021 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7022 */
7023 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7024
7025 if (cp->privacy) {
7026 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7027 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7028 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7029 hci_adv_instances_set_rpa_expired(hdev, true);
7030 if (cp->privacy == 0x02)
7031 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7032 else
7033 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7034 } else {
7035 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7036 memset(hdev->irk, 0, sizeof(hdev->irk));
7037 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7038 hci_adv_instances_set_rpa_expired(hdev, false);
7039 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7040 }
7041
7042 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7043 if (err < 0)
7044 goto unlock;
7045
7046 if (changed)
7047 err = new_settings(hdev, sk);
7048
7049 unlock:
7050 hci_dev_unlock(hdev);
7051 return err;
7052 }
7053
irk_is_valid(struct mgmt_irk_info * irk)7054 static bool irk_is_valid(struct mgmt_irk_info *irk)
7055 {
7056 switch (irk->addr.type) {
7057 case BDADDR_LE_PUBLIC:
7058 return true;
7059
7060 case BDADDR_LE_RANDOM:
7061 /* Two most significant bits shall be set */
7062 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7063 return false;
7064 return true;
7065 }
7066
7067 return false;
7068 }
7069
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7070 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7071 u16 len)
7072 {
7073 struct mgmt_cp_load_irks *cp = cp_data;
7074 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7075 sizeof(struct mgmt_irk_info));
7076 u16 irk_count, expected_len;
7077 int i, err;
7078
7079 bt_dev_dbg(hdev, "sock %p", sk);
7080
7081 if (!lmp_le_capable(hdev))
7082 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7083 MGMT_STATUS_NOT_SUPPORTED);
7084
7085 irk_count = __le16_to_cpu(cp->irk_count);
7086 if (irk_count > max_irk_count) {
7087 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7088 irk_count);
7089 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7090 MGMT_STATUS_INVALID_PARAMS);
7091 }
7092
7093 expected_len = struct_size(cp, irks, irk_count);
7094 if (expected_len != len) {
7095 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7096 expected_len, len);
7097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7098 MGMT_STATUS_INVALID_PARAMS);
7099 }
7100
7101 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7102
7103 for (i = 0; i < irk_count; i++) {
7104 struct mgmt_irk_info *key = &cp->irks[i];
7105
7106 if (!irk_is_valid(key))
7107 return mgmt_cmd_status(sk, hdev->id,
7108 MGMT_OP_LOAD_IRKS,
7109 MGMT_STATUS_INVALID_PARAMS);
7110 }
7111
7112 hci_dev_lock(hdev);
7113
7114 hci_smp_irks_clear(hdev);
7115
7116 for (i = 0; i < irk_count; i++) {
7117 struct mgmt_irk_info *irk = &cp->irks[i];
7118
7119 if (hci_is_blocked_key(hdev,
7120 HCI_BLOCKED_KEY_TYPE_IRK,
7121 irk->val)) {
7122 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7123 &irk->addr.bdaddr);
7124 continue;
7125 }
7126
7127 hci_add_irk(hdev, &irk->addr.bdaddr,
7128 le_addr_type(irk->addr.type), irk->val,
7129 BDADDR_ANY);
7130 }
7131
7132 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7133
7134 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7135
7136 hci_dev_unlock(hdev);
7137
7138 return err;
7139 }
7140
ltk_is_valid(struct mgmt_ltk_info * key)7141 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7142 {
7143 if (key->initiator != 0x00 && key->initiator != 0x01)
7144 return false;
7145
7146 switch (key->addr.type) {
7147 case BDADDR_LE_PUBLIC:
7148 return true;
7149
7150 case BDADDR_LE_RANDOM:
7151 /* Two most significant bits shall be set */
7152 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7153 return false;
7154 return true;
7155 }
7156
7157 return false;
7158 }
7159
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7160 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7161 void *cp_data, u16 len)
7162 {
7163 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7164 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7165 sizeof(struct mgmt_ltk_info));
7166 u16 key_count, expected_len;
7167 int i, err;
7168
7169 bt_dev_dbg(hdev, "sock %p", sk);
7170
7171 if (!lmp_le_capable(hdev))
7172 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7173 MGMT_STATUS_NOT_SUPPORTED);
7174
7175 key_count = __le16_to_cpu(cp->key_count);
7176 if (key_count > max_key_count) {
7177 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7178 key_count);
7179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7180 MGMT_STATUS_INVALID_PARAMS);
7181 }
7182
7183 expected_len = struct_size(cp, keys, key_count);
7184 if (expected_len != len) {
7185 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7186 expected_len, len);
7187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7188 MGMT_STATUS_INVALID_PARAMS);
7189 }
7190
7191 bt_dev_dbg(hdev, "key_count %u", key_count);
7192
7193 hci_dev_lock(hdev);
7194
7195 hci_smp_ltks_clear(hdev);
7196
7197 for (i = 0; i < key_count; i++) {
7198 struct mgmt_ltk_info *key = &cp->keys[i];
7199 u8 type, authenticated;
7200
7201 if (hci_is_blocked_key(hdev,
7202 HCI_BLOCKED_KEY_TYPE_LTK,
7203 key->val)) {
7204 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7205 &key->addr.bdaddr);
7206 continue;
7207 }
7208
7209 if (!ltk_is_valid(key)) {
7210 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7211 &key->addr.bdaddr);
7212 continue;
7213 }
7214
7215 switch (key->type) {
7216 case MGMT_LTK_UNAUTHENTICATED:
7217 authenticated = 0x00;
7218 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7219 break;
7220 case MGMT_LTK_AUTHENTICATED:
7221 authenticated = 0x01;
7222 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7223 break;
7224 case MGMT_LTK_P256_UNAUTH:
7225 authenticated = 0x00;
7226 type = SMP_LTK_P256;
7227 break;
7228 case MGMT_LTK_P256_AUTH:
7229 authenticated = 0x01;
7230 type = SMP_LTK_P256;
7231 break;
7232 case MGMT_LTK_P256_DEBUG:
7233 authenticated = 0x00;
7234 type = SMP_LTK_P256_DEBUG;
7235 fallthrough;
7236 default:
7237 continue;
7238 }
7239
7240 hci_add_ltk(hdev, &key->addr.bdaddr,
7241 le_addr_type(key->addr.type), type, authenticated,
7242 key->val, key->enc_size, key->ediv, key->rand);
7243 }
7244
7245 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7246 NULL, 0);
7247
7248 hci_dev_unlock(hdev);
7249
7250 return err;
7251 }
7252
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7253 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7254 {
7255 struct mgmt_pending_cmd *cmd = data;
7256 struct hci_conn *conn = cmd->user_data;
7257 struct mgmt_cp_get_conn_info *cp = cmd->param;
7258 struct mgmt_rp_get_conn_info rp;
7259 u8 status;
7260
7261 bt_dev_dbg(hdev, "err %d", err);
7262
7263 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7264
7265 status = mgmt_status(err);
7266 if (status == MGMT_STATUS_SUCCESS) {
7267 rp.rssi = conn->rssi;
7268 rp.tx_power = conn->tx_power;
7269 rp.max_tx_power = conn->max_tx_power;
7270 } else {
7271 rp.rssi = HCI_RSSI_INVALID;
7272 rp.tx_power = HCI_TX_POWER_INVALID;
7273 rp.max_tx_power = HCI_TX_POWER_INVALID;
7274 }
7275
7276 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7277 &rp, sizeof(rp));
7278
7279 mgmt_pending_free(cmd);
7280 }
7281
get_conn_info_sync(struct hci_dev * hdev,void * data)7282 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7283 {
7284 struct mgmt_pending_cmd *cmd = data;
7285 struct mgmt_cp_get_conn_info *cp = cmd->param;
7286 struct hci_conn *conn;
7287 int err;
7288 __le16 handle;
7289
7290 /* Make sure we are still connected */
7291 if (cp->addr.type == BDADDR_BREDR)
7292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7293 &cp->addr.bdaddr);
7294 else
7295 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7296
7297 if (!conn || conn->state != BT_CONNECTED)
7298 return MGMT_STATUS_NOT_CONNECTED;
7299
7300 cmd->user_data = conn;
7301 handle = cpu_to_le16(conn->handle);
7302
7303 /* Refresh RSSI each time */
7304 err = hci_read_rssi_sync(hdev, handle);
7305
7306 /* For LE links TX power does not change thus we don't need to
7307 * query for it once value is known.
7308 */
7309 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7310 conn->tx_power == HCI_TX_POWER_INVALID))
7311 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7312
7313 /* Max TX power needs to be read only once per connection */
7314 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7315 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7316
7317 return err;
7318 }
7319
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7320 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7321 u16 len)
7322 {
7323 struct mgmt_cp_get_conn_info *cp = data;
7324 struct mgmt_rp_get_conn_info rp;
7325 struct hci_conn *conn;
7326 unsigned long conn_info_age;
7327 int err = 0;
7328
7329 bt_dev_dbg(hdev, "sock %p", sk);
7330
7331 memset(&rp, 0, sizeof(rp));
7332 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7333 rp.addr.type = cp->addr.type;
7334
7335 if (!bdaddr_type_is_valid(cp->addr.type))
7336 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7337 MGMT_STATUS_INVALID_PARAMS,
7338 &rp, sizeof(rp));
7339
7340 hci_dev_lock(hdev);
7341
7342 if (!hdev_is_powered(hdev)) {
7343 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7344 MGMT_STATUS_NOT_POWERED, &rp,
7345 sizeof(rp));
7346 goto unlock;
7347 }
7348
7349 if (cp->addr.type == BDADDR_BREDR)
7350 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7351 &cp->addr.bdaddr);
7352 else
7353 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7354
7355 if (!conn || conn->state != BT_CONNECTED) {
7356 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7357 MGMT_STATUS_NOT_CONNECTED, &rp,
7358 sizeof(rp));
7359 goto unlock;
7360 }
7361
7362 /* To avoid client trying to guess when to poll again for information we
7363 * calculate conn info age as random value between min/max set in hdev.
7364 */
7365 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7366 hdev->conn_info_max_age - 1);
7367
7368 /* Query controller to refresh cached values if they are too old or were
7369 * never read.
7370 */
7371 if (time_after(jiffies, conn->conn_info_timestamp +
7372 msecs_to_jiffies(conn_info_age)) ||
7373 !conn->conn_info_timestamp) {
7374 struct mgmt_pending_cmd *cmd;
7375
7376 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7377 len);
7378 if (!cmd) {
7379 err = -ENOMEM;
7380 } else {
7381 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7382 cmd, get_conn_info_complete);
7383 }
7384
7385 if (err < 0) {
7386 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7387 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7388
7389 if (cmd)
7390 mgmt_pending_free(cmd);
7391
7392 goto unlock;
7393 }
7394
7395 conn->conn_info_timestamp = jiffies;
7396 } else {
7397 /* Cache is valid, just reply with values cached in hci_conn */
7398 rp.rssi = conn->rssi;
7399 rp.tx_power = conn->tx_power;
7400 rp.max_tx_power = conn->max_tx_power;
7401
7402 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7403 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7404 }
7405
7406 unlock:
7407 hci_dev_unlock(hdev);
7408 return err;
7409 }
7410
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7411 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7412 {
7413 struct mgmt_pending_cmd *cmd = data;
7414 struct mgmt_cp_get_clock_info *cp = cmd->param;
7415 struct mgmt_rp_get_clock_info rp;
7416 struct hci_conn *conn = cmd->user_data;
7417 u8 status = mgmt_status(err);
7418
7419 bt_dev_dbg(hdev, "err %d", err);
7420
7421 memset(&rp, 0, sizeof(rp));
7422 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7423 rp.addr.type = cp->addr.type;
7424
7425 if (err)
7426 goto complete;
7427
7428 rp.local_clock = cpu_to_le32(hdev->clock);
7429
7430 if (conn) {
7431 rp.piconet_clock = cpu_to_le32(conn->clock);
7432 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7433 }
7434
7435 complete:
7436 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7437 sizeof(rp));
7438
7439 mgmt_pending_free(cmd);
7440 }
7441
get_clock_info_sync(struct hci_dev * hdev,void * data)7442 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7443 {
7444 struct mgmt_pending_cmd *cmd = data;
7445 struct mgmt_cp_get_clock_info *cp = cmd->param;
7446 struct hci_cp_read_clock hci_cp;
7447 struct hci_conn *conn;
7448
7449 memset(&hci_cp, 0, sizeof(hci_cp));
7450 hci_read_clock_sync(hdev, &hci_cp);
7451
7452 /* Make sure connection still exists */
7453 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7454 if (!conn || conn->state != BT_CONNECTED)
7455 return MGMT_STATUS_NOT_CONNECTED;
7456
7457 cmd->user_data = conn;
7458 hci_cp.handle = cpu_to_le16(conn->handle);
7459 hci_cp.which = 0x01; /* Piconet clock */
7460
7461 return hci_read_clock_sync(hdev, &hci_cp);
7462 }
7463
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7464 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7465 u16 len)
7466 {
7467 struct mgmt_cp_get_clock_info *cp = data;
7468 struct mgmt_rp_get_clock_info rp;
7469 struct mgmt_pending_cmd *cmd;
7470 struct hci_conn *conn;
7471 int err;
7472
7473 bt_dev_dbg(hdev, "sock %p", sk);
7474
7475 memset(&rp, 0, sizeof(rp));
7476 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7477 rp.addr.type = cp->addr.type;
7478
7479 if (cp->addr.type != BDADDR_BREDR)
7480 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7481 MGMT_STATUS_INVALID_PARAMS,
7482 &rp, sizeof(rp));
7483
7484 hci_dev_lock(hdev);
7485
7486 if (!hdev_is_powered(hdev)) {
7487 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7488 MGMT_STATUS_NOT_POWERED, &rp,
7489 sizeof(rp));
7490 goto unlock;
7491 }
7492
7493 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7494 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7495 &cp->addr.bdaddr);
7496 if (!conn || conn->state != BT_CONNECTED) {
7497 err = mgmt_cmd_complete(sk, hdev->id,
7498 MGMT_OP_GET_CLOCK_INFO,
7499 MGMT_STATUS_NOT_CONNECTED,
7500 &rp, sizeof(rp));
7501 goto unlock;
7502 }
7503 } else {
7504 conn = NULL;
7505 }
7506
7507 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7508 if (!cmd)
7509 err = -ENOMEM;
7510 else
7511 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7512 get_clock_info_complete);
7513
7514 if (err < 0) {
7515 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7516 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7517
7518 if (cmd)
7519 mgmt_pending_free(cmd);
7520 }
7521
7522
7523 unlock:
7524 hci_dev_unlock(hdev);
7525 return err;
7526 }
7527
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7528 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7529 {
7530 struct hci_conn *conn;
7531
7532 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7533 if (!conn)
7534 return false;
7535
7536 if (conn->dst_type != type)
7537 return false;
7538
7539 if (conn->state != BT_CONNECTED)
7540 return false;
7541
7542 return true;
7543 }
7544
7545 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7546 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7547 u8 addr_type, u8 auto_connect)
7548 {
7549 struct hci_conn_params *params;
7550
7551 params = hci_conn_params_add(hdev, addr, addr_type);
7552 if (!params)
7553 return -EIO;
7554
7555 if (params->auto_connect == auto_connect)
7556 return 0;
7557
7558 hci_pend_le_list_del_init(params);
7559
7560 switch (auto_connect) {
7561 case HCI_AUTO_CONN_DISABLED:
7562 case HCI_AUTO_CONN_LINK_LOSS:
7563 /* If auto connect is being disabled when we're trying to
7564 * connect to device, keep connecting.
7565 */
7566 if (params->explicit_connect)
7567 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7568 break;
7569 case HCI_AUTO_CONN_REPORT:
7570 if (params->explicit_connect)
7571 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7572 else
7573 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7574 break;
7575 case HCI_AUTO_CONN_DIRECT:
7576 case HCI_AUTO_CONN_ALWAYS:
7577 if (!is_connected(hdev, addr, addr_type))
7578 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7579 break;
7580 }
7581
7582 params->auto_connect = auto_connect;
7583
7584 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7585 addr, addr_type, auto_connect);
7586
7587 return 0;
7588 }
7589
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7590 static void device_added(struct sock *sk, struct hci_dev *hdev,
7591 bdaddr_t *bdaddr, u8 type, u8 action)
7592 {
7593 struct mgmt_ev_device_added ev;
7594
7595 bacpy(&ev.addr.bdaddr, bdaddr);
7596 ev.addr.type = type;
7597 ev.action = action;
7598
7599 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7600 }
7601
add_device_complete(struct hci_dev * hdev,void * data,int err)7602 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7603 {
7604 struct mgmt_pending_cmd *cmd = data;
7605 struct mgmt_cp_add_device *cp = cmd->param;
7606
7607 if (!err) {
7608 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7609 cp->action);
7610 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7611 cp->addr.type, hdev->conn_flags,
7612 PTR_UINT(cmd->user_data));
7613 }
7614
7615 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7616 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7617 mgmt_pending_free(cmd);
7618 }
7619
add_device_sync(struct hci_dev * hdev,void * data)7620 static int add_device_sync(struct hci_dev *hdev, void *data)
7621 {
7622 return hci_update_passive_scan_sync(hdev);
7623 }
7624
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7625 static int add_device(struct sock *sk, struct hci_dev *hdev,
7626 void *data, u16 len)
7627 {
7628 struct mgmt_pending_cmd *cmd;
7629 struct mgmt_cp_add_device *cp = data;
7630 u8 auto_conn, addr_type;
7631 struct hci_conn_params *params;
7632 int err;
7633 u32 current_flags = 0;
7634 u32 supported_flags;
7635
7636 bt_dev_dbg(hdev, "sock %p", sk);
7637
7638 if (!bdaddr_type_is_valid(cp->addr.type) ||
7639 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7640 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7641 MGMT_STATUS_INVALID_PARAMS,
7642 &cp->addr, sizeof(cp->addr));
7643
7644 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7645 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7646 MGMT_STATUS_INVALID_PARAMS,
7647 &cp->addr, sizeof(cp->addr));
7648
7649 hci_dev_lock(hdev);
7650
7651 if (cp->addr.type == BDADDR_BREDR) {
7652 /* Only incoming connections action is supported for now */
7653 if (cp->action != 0x01) {
7654 err = mgmt_cmd_complete(sk, hdev->id,
7655 MGMT_OP_ADD_DEVICE,
7656 MGMT_STATUS_INVALID_PARAMS,
7657 &cp->addr, sizeof(cp->addr));
7658 goto unlock;
7659 }
7660
7661 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7662 &cp->addr.bdaddr,
7663 cp->addr.type, 0);
7664 if (err)
7665 goto unlock;
7666
7667 hci_update_scan(hdev);
7668
7669 goto added;
7670 }
7671
7672 addr_type = le_addr_type(cp->addr.type);
7673
7674 if (cp->action == 0x02)
7675 auto_conn = HCI_AUTO_CONN_ALWAYS;
7676 else if (cp->action == 0x01)
7677 auto_conn = HCI_AUTO_CONN_DIRECT;
7678 else
7679 auto_conn = HCI_AUTO_CONN_REPORT;
7680
7681 /* Kernel internally uses conn_params with resolvable private
7682 * address, but Add Device allows only identity addresses.
7683 * Make sure it is enforced before calling
7684 * hci_conn_params_lookup.
7685 */
7686 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7687 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7688 MGMT_STATUS_INVALID_PARAMS,
7689 &cp->addr, sizeof(cp->addr));
7690 goto unlock;
7691 }
7692
7693 /* If the connection parameters don't exist for this device,
7694 * they will be created and configured with defaults.
7695 */
7696 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7697 auto_conn) < 0) {
7698 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7699 MGMT_STATUS_FAILED, &cp->addr,
7700 sizeof(cp->addr));
7701 goto unlock;
7702 } else {
7703 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7704 addr_type);
7705 if (params)
7706 current_flags = params->flags;
7707 }
7708
7709 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7710 if (!cmd) {
7711 err = -ENOMEM;
7712 goto unlock;
7713 }
7714
7715 cmd->user_data = UINT_PTR(current_flags);
7716
7717 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7718 add_device_complete);
7719 if (err < 0) {
7720 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7721 MGMT_STATUS_FAILED, &cp->addr,
7722 sizeof(cp->addr));
7723 mgmt_pending_free(cmd);
7724 }
7725
7726 goto unlock;
7727
7728 added:
7729 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7730 supported_flags = hdev->conn_flags;
7731 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7732 supported_flags, current_flags);
7733
7734 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7735 MGMT_STATUS_SUCCESS, &cp->addr,
7736 sizeof(cp->addr));
7737
7738 unlock:
7739 hci_dev_unlock(hdev);
7740 return err;
7741 }
7742
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7743 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7744 bdaddr_t *bdaddr, u8 type)
7745 {
7746 struct mgmt_ev_device_removed ev;
7747
7748 bacpy(&ev.addr.bdaddr, bdaddr);
7749 ev.addr.type = type;
7750
7751 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7752 }
7753
remove_device_sync(struct hci_dev * hdev,void * data)7754 static int remove_device_sync(struct hci_dev *hdev, void *data)
7755 {
7756 return hci_update_passive_scan_sync(hdev);
7757 }
7758
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7759 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7760 void *data, u16 len)
7761 {
7762 struct mgmt_cp_remove_device *cp = data;
7763 int err;
7764
7765 bt_dev_dbg(hdev, "sock %p", sk);
7766
7767 hci_dev_lock(hdev);
7768
7769 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7770 struct hci_conn_params *params;
7771 u8 addr_type;
7772
7773 if (!bdaddr_type_is_valid(cp->addr.type)) {
7774 err = mgmt_cmd_complete(sk, hdev->id,
7775 MGMT_OP_REMOVE_DEVICE,
7776 MGMT_STATUS_INVALID_PARAMS,
7777 &cp->addr, sizeof(cp->addr));
7778 goto unlock;
7779 }
7780
7781 if (cp->addr.type == BDADDR_BREDR) {
7782 err = hci_bdaddr_list_del(&hdev->accept_list,
7783 &cp->addr.bdaddr,
7784 cp->addr.type);
7785 if (err) {
7786 err = mgmt_cmd_complete(sk, hdev->id,
7787 MGMT_OP_REMOVE_DEVICE,
7788 MGMT_STATUS_INVALID_PARAMS,
7789 &cp->addr,
7790 sizeof(cp->addr));
7791 goto unlock;
7792 }
7793
7794 hci_update_scan(hdev);
7795
7796 device_removed(sk, hdev, &cp->addr.bdaddr,
7797 cp->addr.type);
7798 goto complete;
7799 }
7800
7801 addr_type = le_addr_type(cp->addr.type);
7802
7803 /* Kernel internally uses conn_params with resolvable private
7804 * address, but Remove Device allows only identity addresses.
7805 * Make sure it is enforced before calling
7806 * hci_conn_params_lookup.
7807 */
7808 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7809 err = mgmt_cmd_complete(sk, hdev->id,
7810 MGMT_OP_REMOVE_DEVICE,
7811 MGMT_STATUS_INVALID_PARAMS,
7812 &cp->addr, sizeof(cp->addr));
7813 goto unlock;
7814 }
7815
7816 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7817 addr_type);
7818 if (!params) {
7819 err = mgmt_cmd_complete(sk, hdev->id,
7820 MGMT_OP_REMOVE_DEVICE,
7821 MGMT_STATUS_INVALID_PARAMS,
7822 &cp->addr, sizeof(cp->addr));
7823 goto unlock;
7824 }
7825
7826 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7827 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7828 err = mgmt_cmd_complete(sk, hdev->id,
7829 MGMT_OP_REMOVE_DEVICE,
7830 MGMT_STATUS_INVALID_PARAMS,
7831 &cp->addr, sizeof(cp->addr));
7832 goto unlock;
7833 }
7834
7835 hci_conn_params_free(params);
7836
7837 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7838 } else {
7839 struct hci_conn_params *p, *tmp;
7840 struct bdaddr_list *b, *btmp;
7841
7842 if (cp->addr.type) {
7843 err = mgmt_cmd_complete(sk, hdev->id,
7844 MGMT_OP_REMOVE_DEVICE,
7845 MGMT_STATUS_INVALID_PARAMS,
7846 &cp->addr, sizeof(cp->addr));
7847 goto unlock;
7848 }
7849
7850 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7851 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7852 list_del(&b->list);
7853 kfree(b);
7854 }
7855
7856 hci_update_scan(hdev);
7857
7858 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7859 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7860 continue;
7861 device_removed(sk, hdev, &p->addr, p->addr_type);
7862 if (p->explicit_connect) {
7863 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7864 continue;
7865 }
7866 hci_conn_params_free(p);
7867 }
7868
7869 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7870 }
7871
7872 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7873
7874 complete:
7875 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7876 MGMT_STATUS_SUCCESS, &cp->addr,
7877 sizeof(cp->addr));
7878 unlock:
7879 hci_dev_unlock(hdev);
7880 return err;
7881 }
7882
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7883 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7884 u16 len)
7885 {
7886 struct mgmt_cp_load_conn_param *cp = data;
7887 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7888 sizeof(struct mgmt_conn_param));
7889 u16 param_count, expected_len;
7890 int i;
7891
7892 if (!lmp_le_capable(hdev))
7893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7894 MGMT_STATUS_NOT_SUPPORTED);
7895
7896 param_count = __le16_to_cpu(cp->param_count);
7897 if (param_count > max_param_count) {
7898 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7899 param_count);
7900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7901 MGMT_STATUS_INVALID_PARAMS);
7902 }
7903
7904 expected_len = struct_size(cp, params, param_count);
7905 if (expected_len != len) {
7906 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7907 expected_len, len);
7908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7909 MGMT_STATUS_INVALID_PARAMS);
7910 }
7911
7912 bt_dev_dbg(hdev, "param_count %u", param_count);
7913
7914 hci_dev_lock(hdev);
7915
7916 hci_conn_params_clear_disabled(hdev);
7917
7918 for (i = 0; i < param_count; i++) {
7919 struct mgmt_conn_param *param = &cp->params[i];
7920 struct hci_conn_params *hci_param;
7921 u16 min, max, latency, timeout;
7922 u8 addr_type;
7923
7924 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7925 param->addr.type);
7926
7927 if (param->addr.type == BDADDR_LE_PUBLIC) {
7928 addr_type = ADDR_LE_DEV_PUBLIC;
7929 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7930 addr_type = ADDR_LE_DEV_RANDOM;
7931 } else {
7932 bt_dev_err(hdev, "ignoring invalid connection parameters");
7933 continue;
7934 }
7935
7936 min = le16_to_cpu(param->min_interval);
7937 max = le16_to_cpu(param->max_interval);
7938 latency = le16_to_cpu(param->latency);
7939 timeout = le16_to_cpu(param->timeout);
7940
7941 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7942 min, max, latency, timeout);
7943
7944 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7945 bt_dev_err(hdev, "ignoring invalid connection parameters");
7946 continue;
7947 }
7948
7949 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7950 addr_type);
7951 if (!hci_param) {
7952 bt_dev_err(hdev, "failed to add connection parameters");
7953 continue;
7954 }
7955
7956 hci_param->conn_min_interval = min;
7957 hci_param->conn_max_interval = max;
7958 hci_param->conn_latency = latency;
7959 hci_param->supervision_timeout = timeout;
7960 }
7961
7962 hci_dev_unlock(hdev);
7963
7964 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7965 NULL, 0);
7966 }
7967
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7968 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7969 void *data, u16 len)
7970 {
7971 struct mgmt_cp_set_external_config *cp = data;
7972 bool changed;
7973 int err;
7974
7975 bt_dev_dbg(hdev, "sock %p", sk);
7976
7977 if (hdev_is_powered(hdev))
7978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7979 MGMT_STATUS_REJECTED);
7980
7981 if (cp->config != 0x00 && cp->config != 0x01)
7982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7983 MGMT_STATUS_INVALID_PARAMS);
7984
7985 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7987 MGMT_STATUS_NOT_SUPPORTED);
7988
7989 hci_dev_lock(hdev);
7990
7991 if (cp->config)
7992 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7993 else
7994 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7995
7996 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7997 if (err < 0)
7998 goto unlock;
7999
8000 if (!changed)
8001 goto unlock;
8002
8003 err = new_options(hdev, sk);
8004
8005 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8006 mgmt_index_removed(hdev);
8007
8008 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8009 hci_dev_set_flag(hdev, HCI_CONFIG);
8010 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8011
8012 queue_work(hdev->req_workqueue, &hdev->power_on);
8013 } else {
8014 set_bit(HCI_RAW, &hdev->flags);
8015 mgmt_index_added(hdev);
8016 }
8017 }
8018
8019 unlock:
8020 hci_dev_unlock(hdev);
8021 return err;
8022 }
8023
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8024 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8025 void *data, u16 len)
8026 {
8027 struct mgmt_cp_set_public_address *cp = data;
8028 bool changed;
8029 int err;
8030
8031 bt_dev_dbg(hdev, "sock %p", sk);
8032
8033 if (hdev_is_powered(hdev))
8034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8035 MGMT_STATUS_REJECTED);
8036
8037 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8039 MGMT_STATUS_INVALID_PARAMS);
8040
8041 if (!hdev->set_bdaddr)
8042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8043 MGMT_STATUS_NOT_SUPPORTED);
8044
8045 hci_dev_lock(hdev);
8046
8047 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8048 bacpy(&hdev->public_addr, &cp->bdaddr);
8049
8050 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8051 if (err < 0)
8052 goto unlock;
8053
8054 if (!changed)
8055 goto unlock;
8056
8057 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8058 err = new_options(hdev, sk);
8059
8060 if (is_configured(hdev)) {
8061 mgmt_index_removed(hdev);
8062
8063 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8064
8065 hci_dev_set_flag(hdev, HCI_CONFIG);
8066 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8067
8068 queue_work(hdev->req_workqueue, &hdev->power_on);
8069 }
8070
8071 unlock:
8072 hci_dev_unlock(hdev);
8073 return err;
8074 }
8075
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8076 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8077 int err)
8078 {
8079 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8080 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8081 u8 *h192, *r192, *h256, *r256;
8082 struct mgmt_pending_cmd *cmd = data;
8083 struct sk_buff *skb = cmd->skb;
8084 u8 status = mgmt_status(err);
8085 u16 eir_len;
8086
8087 if (err == -ECANCELED ||
8088 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8089 return;
8090
8091 if (!status) {
8092 if (!skb)
8093 status = MGMT_STATUS_FAILED;
8094 else if (IS_ERR(skb))
8095 status = mgmt_status(PTR_ERR(skb));
8096 else
8097 status = mgmt_status(skb->data[0]);
8098 }
8099
8100 bt_dev_dbg(hdev, "status %u", status);
8101
8102 mgmt_cp = cmd->param;
8103
8104 if (status) {
8105 status = mgmt_status(status);
8106 eir_len = 0;
8107
8108 h192 = NULL;
8109 r192 = NULL;
8110 h256 = NULL;
8111 r256 = NULL;
8112 } else if (!bredr_sc_enabled(hdev)) {
8113 struct hci_rp_read_local_oob_data *rp;
8114
8115 if (skb->len != sizeof(*rp)) {
8116 status = MGMT_STATUS_FAILED;
8117 eir_len = 0;
8118 } else {
8119 status = MGMT_STATUS_SUCCESS;
8120 rp = (void *)skb->data;
8121
8122 eir_len = 5 + 18 + 18;
8123 h192 = rp->hash;
8124 r192 = rp->rand;
8125 h256 = NULL;
8126 r256 = NULL;
8127 }
8128 } else {
8129 struct hci_rp_read_local_oob_ext_data *rp;
8130
8131 if (skb->len != sizeof(*rp)) {
8132 status = MGMT_STATUS_FAILED;
8133 eir_len = 0;
8134 } else {
8135 status = MGMT_STATUS_SUCCESS;
8136 rp = (void *)skb->data;
8137
8138 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8139 eir_len = 5 + 18 + 18;
8140 h192 = NULL;
8141 r192 = NULL;
8142 } else {
8143 eir_len = 5 + 18 + 18 + 18 + 18;
8144 h192 = rp->hash192;
8145 r192 = rp->rand192;
8146 }
8147
8148 h256 = rp->hash256;
8149 r256 = rp->rand256;
8150 }
8151 }
8152
8153 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8154 if (!mgmt_rp)
8155 goto done;
8156
8157 if (eir_len == 0)
8158 goto send_rsp;
8159
8160 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8161 hdev->dev_class, 3);
8162
8163 if (h192 && r192) {
8164 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8165 EIR_SSP_HASH_C192, h192, 16);
8166 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8167 EIR_SSP_RAND_R192, r192, 16);
8168 }
8169
8170 if (h256 && r256) {
8171 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8172 EIR_SSP_HASH_C256, h256, 16);
8173 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8174 EIR_SSP_RAND_R256, r256, 16);
8175 }
8176
8177 send_rsp:
8178 mgmt_rp->type = mgmt_cp->type;
8179 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8180
8181 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8182 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8183 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8184 if (err < 0 || status)
8185 goto done;
8186
8187 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8188
8189 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8190 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8191 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8192 done:
8193 if (skb && !IS_ERR(skb))
8194 kfree_skb(skb);
8195
8196 kfree(mgmt_rp);
8197 mgmt_pending_remove(cmd);
8198 }
8199
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8200 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8201 struct mgmt_cp_read_local_oob_ext_data *cp)
8202 {
8203 struct mgmt_pending_cmd *cmd;
8204 int err;
8205
8206 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8207 cp, sizeof(*cp));
8208 if (!cmd)
8209 return -ENOMEM;
8210
8211 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8212 read_local_oob_ext_data_complete);
8213
8214 if (err < 0) {
8215 mgmt_pending_remove(cmd);
8216 return err;
8217 }
8218
8219 return 0;
8220 }
8221
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8222 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8223 void *data, u16 data_len)
8224 {
8225 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8226 struct mgmt_rp_read_local_oob_ext_data *rp;
8227 size_t rp_len;
8228 u16 eir_len;
8229 u8 status, flags, role, addr[7], hash[16], rand[16];
8230 int err;
8231
8232 bt_dev_dbg(hdev, "sock %p", sk);
8233
8234 if (hdev_is_powered(hdev)) {
8235 switch (cp->type) {
8236 case BIT(BDADDR_BREDR):
8237 status = mgmt_bredr_support(hdev);
8238 if (status)
8239 eir_len = 0;
8240 else
8241 eir_len = 5;
8242 break;
8243 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8244 status = mgmt_le_support(hdev);
8245 if (status)
8246 eir_len = 0;
8247 else
8248 eir_len = 9 + 3 + 18 + 18 + 3;
8249 break;
8250 default:
8251 status = MGMT_STATUS_INVALID_PARAMS;
8252 eir_len = 0;
8253 break;
8254 }
8255 } else {
8256 status = MGMT_STATUS_NOT_POWERED;
8257 eir_len = 0;
8258 }
8259
8260 rp_len = sizeof(*rp) + eir_len;
8261 rp = kmalloc(rp_len, GFP_ATOMIC);
8262 if (!rp)
8263 return -ENOMEM;
8264
8265 if (!status && !lmp_ssp_capable(hdev)) {
8266 status = MGMT_STATUS_NOT_SUPPORTED;
8267 eir_len = 0;
8268 }
8269
8270 if (status)
8271 goto complete;
8272
8273 hci_dev_lock(hdev);
8274
8275 eir_len = 0;
8276 switch (cp->type) {
8277 case BIT(BDADDR_BREDR):
8278 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8279 err = read_local_ssp_oob_req(hdev, sk, cp);
8280 hci_dev_unlock(hdev);
8281 if (!err)
8282 goto done;
8283
8284 status = MGMT_STATUS_FAILED;
8285 goto complete;
8286 } else {
8287 eir_len = eir_append_data(rp->eir, eir_len,
8288 EIR_CLASS_OF_DEV,
8289 hdev->dev_class, 3);
8290 }
8291 break;
8292 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8293 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8294 smp_generate_oob(hdev, hash, rand) < 0) {
8295 hci_dev_unlock(hdev);
8296 status = MGMT_STATUS_FAILED;
8297 goto complete;
8298 }
8299
8300 /* This should return the active RPA, but since the RPA
8301 * is only programmed on demand, it is really hard to fill
8302 * this in at the moment. For now disallow retrieving
8303 * local out-of-band data when privacy is in use.
8304 *
8305 * Returning the identity address will not help here since
8306 * pairing happens before the identity resolving key is
8307 * known and thus the connection establishment happens
8308 * based on the RPA and not the identity address.
8309 */
8310 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8311 hci_dev_unlock(hdev);
8312 status = MGMT_STATUS_REJECTED;
8313 goto complete;
8314 }
8315
8316 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8317 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8318 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8319 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8320 memcpy(addr, &hdev->static_addr, 6);
8321 addr[6] = 0x01;
8322 } else {
8323 memcpy(addr, &hdev->bdaddr, 6);
8324 addr[6] = 0x00;
8325 }
8326
8327 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8328 addr, sizeof(addr));
8329
8330 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8331 role = 0x02;
8332 else
8333 role = 0x01;
8334
8335 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8336 &role, sizeof(role));
8337
8338 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8339 eir_len = eir_append_data(rp->eir, eir_len,
8340 EIR_LE_SC_CONFIRM,
8341 hash, sizeof(hash));
8342
8343 eir_len = eir_append_data(rp->eir, eir_len,
8344 EIR_LE_SC_RANDOM,
8345 rand, sizeof(rand));
8346 }
8347
8348 flags = mgmt_get_adv_discov_flags(hdev);
8349
8350 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8351 flags |= LE_AD_NO_BREDR;
8352
8353 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8354 &flags, sizeof(flags));
8355 break;
8356 }
8357
8358 hci_dev_unlock(hdev);
8359
8360 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8361
8362 status = MGMT_STATUS_SUCCESS;
8363
8364 complete:
8365 rp->type = cp->type;
8366 rp->eir_len = cpu_to_le16(eir_len);
8367
8368 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8369 status, rp, sizeof(*rp) + eir_len);
8370 if (err < 0 || status)
8371 goto done;
8372
8373 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8374 rp, sizeof(*rp) + eir_len,
8375 HCI_MGMT_OOB_DATA_EVENTS, sk);
8376
8377 done:
8378 kfree(rp);
8379
8380 return err;
8381 }
8382
get_supported_adv_flags(struct hci_dev * hdev)8383 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8384 {
8385 u32 flags = 0;
8386
8387 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8388 flags |= MGMT_ADV_FLAG_DISCOV;
8389 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8390 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8391 flags |= MGMT_ADV_FLAG_APPEARANCE;
8392 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8393 flags |= MGMT_ADV_PARAM_DURATION;
8394 flags |= MGMT_ADV_PARAM_TIMEOUT;
8395 flags |= MGMT_ADV_PARAM_INTERVALS;
8396 flags |= MGMT_ADV_PARAM_TX_POWER;
8397 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8398
8399 /* In extended adv TX_POWER returned from Set Adv Param
8400 * will be always valid.
8401 */
8402 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8403 flags |= MGMT_ADV_FLAG_TX_POWER;
8404
8405 if (ext_adv_capable(hdev)) {
8406 flags |= MGMT_ADV_FLAG_SEC_1M;
8407 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8408 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8409
8410 if (le_2m_capable(hdev))
8411 flags |= MGMT_ADV_FLAG_SEC_2M;
8412
8413 if (le_coded_capable(hdev))
8414 flags |= MGMT_ADV_FLAG_SEC_CODED;
8415 }
8416
8417 return flags;
8418 }
8419
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8420 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8421 void *data, u16 data_len)
8422 {
8423 struct mgmt_rp_read_adv_features *rp;
8424 size_t rp_len;
8425 int err;
8426 struct adv_info *adv_instance;
8427 u32 supported_flags;
8428 u8 *instance;
8429
8430 bt_dev_dbg(hdev, "sock %p", sk);
8431
8432 if (!lmp_le_capable(hdev))
8433 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8434 MGMT_STATUS_REJECTED);
8435
8436 hci_dev_lock(hdev);
8437
8438 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8439 rp = kmalloc(rp_len, GFP_ATOMIC);
8440 if (!rp) {
8441 hci_dev_unlock(hdev);
8442 return -ENOMEM;
8443 }
8444
8445 supported_flags = get_supported_adv_flags(hdev);
8446
8447 rp->supported_flags = cpu_to_le32(supported_flags);
8448 rp->max_adv_data_len = max_adv_len(hdev);
8449 rp->max_scan_rsp_len = max_adv_len(hdev);
8450 rp->max_instances = hdev->le_num_of_adv_sets;
8451 rp->num_instances = hdev->adv_instance_cnt;
8452
8453 instance = rp->instance;
8454 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8455 /* Only instances 1-le_num_of_adv_sets are externally visible */
8456 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8457 *instance = adv_instance->instance;
8458 instance++;
8459 } else {
8460 rp->num_instances--;
8461 rp_len--;
8462 }
8463 }
8464
8465 hci_dev_unlock(hdev);
8466
8467 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8468 MGMT_STATUS_SUCCESS, rp, rp_len);
8469
8470 kfree(rp);
8471
8472 return err;
8473 }
8474
calculate_name_len(struct hci_dev * hdev)8475 static u8 calculate_name_len(struct hci_dev *hdev)
8476 {
8477 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8478
8479 return eir_append_local_name(hdev, buf, 0);
8480 }
8481
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8482 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8483 bool is_adv_data)
8484 {
8485 u8 max_len = max_adv_len(hdev);
8486
8487 if (is_adv_data) {
8488 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8489 MGMT_ADV_FLAG_LIMITED_DISCOV |
8490 MGMT_ADV_FLAG_MANAGED_FLAGS))
8491 max_len -= 3;
8492
8493 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8494 max_len -= 3;
8495 } else {
8496 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8497 max_len -= calculate_name_len(hdev);
8498
8499 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8500 max_len -= 4;
8501 }
8502
8503 return max_len;
8504 }
8505
flags_managed(u32 adv_flags)8506 static bool flags_managed(u32 adv_flags)
8507 {
8508 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8509 MGMT_ADV_FLAG_LIMITED_DISCOV |
8510 MGMT_ADV_FLAG_MANAGED_FLAGS);
8511 }
8512
tx_power_managed(u32 adv_flags)8513 static bool tx_power_managed(u32 adv_flags)
8514 {
8515 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8516 }
8517
name_managed(u32 adv_flags)8518 static bool name_managed(u32 adv_flags)
8519 {
8520 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8521 }
8522
appearance_managed(u32 adv_flags)8523 static bool appearance_managed(u32 adv_flags)
8524 {
8525 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8526 }
8527
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8528 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8529 u8 len, bool is_adv_data)
8530 {
8531 int i, cur_len;
8532 u8 max_len;
8533
8534 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8535
8536 if (len > max_len)
8537 return false;
8538
8539 /* Make sure that the data is correctly formatted. */
8540 for (i = 0; i < len; i += (cur_len + 1)) {
8541 cur_len = data[i];
8542
8543 if (!cur_len)
8544 continue;
8545
8546 if (data[i + 1] == EIR_FLAGS &&
8547 (!is_adv_data || flags_managed(adv_flags)))
8548 return false;
8549
8550 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8551 return false;
8552
8553 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8554 return false;
8555
8556 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8557 return false;
8558
8559 if (data[i + 1] == EIR_APPEARANCE &&
8560 appearance_managed(adv_flags))
8561 return false;
8562
8563 /* If the current field length would exceed the total data
8564 * length, then it's invalid.
8565 */
8566 if (i + cur_len >= len)
8567 return false;
8568 }
8569
8570 return true;
8571 }
8572
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8573 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8574 {
8575 u32 supported_flags, phy_flags;
8576
8577 /* The current implementation only supports a subset of the specified
8578 * flags. Also need to check mutual exclusiveness of sec flags.
8579 */
8580 supported_flags = get_supported_adv_flags(hdev);
8581 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8582 if (adv_flags & ~supported_flags ||
8583 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8584 return false;
8585
8586 return true;
8587 }
8588
adv_busy(struct hci_dev * hdev)8589 static bool adv_busy(struct hci_dev *hdev)
8590 {
8591 return pending_find(MGMT_OP_SET_LE, hdev);
8592 }
8593
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8594 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8595 int err)
8596 {
8597 struct adv_info *adv, *n;
8598
8599 bt_dev_dbg(hdev, "err %d", err);
8600
8601 hci_dev_lock(hdev);
8602
8603 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8604 u8 instance;
8605
8606 if (!adv->pending)
8607 continue;
8608
8609 if (!err) {
8610 adv->pending = false;
8611 continue;
8612 }
8613
8614 instance = adv->instance;
8615
8616 if (hdev->cur_adv_instance == instance)
8617 cancel_adv_timeout(hdev);
8618
8619 hci_remove_adv_instance(hdev, instance);
8620 mgmt_advertising_removed(sk, hdev, instance);
8621 }
8622
8623 hci_dev_unlock(hdev);
8624 }
8625
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8626 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8627 {
8628 struct mgmt_pending_cmd *cmd = data;
8629 struct mgmt_cp_add_advertising *cp = cmd->param;
8630 struct mgmt_rp_add_advertising rp;
8631
8632 memset(&rp, 0, sizeof(rp));
8633
8634 rp.instance = cp->instance;
8635
8636 if (err)
8637 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8638 mgmt_status(err));
8639 else
8640 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8641 mgmt_status(err), &rp, sizeof(rp));
8642
8643 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8644
8645 mgmt_pending_free(cmd);
8646 }
8647
add_advertising_sync(struct hci_dev * hdev,void * data)8648 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8649 {
8650 struct mgmt_pending_cmd *cmd = data;
8651 struct mgmt_cp_add_advertising *cp = cmd->param;
8652
8653 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8654 }
8655
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8656 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8657 void *data, u16 data_len)
8658 {
8659 struct mgmt_cp_add_advertising *cp = data;
8660 struct mgmt_rp_add_advertising rp;
8661 u32 flags;
8662 u8 status;
8663 u16 timeout, duration;
8664 unsigned int prev_instance_cnt;
8665 u8 schedule_instance = 0;
8666 struct adv_info *adv, *next_instance;
8667 int err;
8668 struct mgmt_pending_cmd *cmd;
8669
8670 bt_dev_dbg(hdev, "sock %p", sk);
8671
8672 status = mgmt_le_support(hdev);
8673 if (status)
8674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8675 status);
8676
8677 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8679 MGMT_STATUS_INVALID_PARAMS);
8680
8681 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8683 MGMT_STATUS_INVALID_PARAMS);
8684
8685 flags = __le32_to_cpu(cp->flags);
8686 timeout = __le16_to_cpu(cp->timeout);
8687 duration = __le16_to_cpu(cp->duration);
8688
8689 if (!requested_adv_flags_are_valid(hdev, flags))
8690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8691 MGMT_STATUS_INVALID_PARAMS);
8692
8693 hci_dev_lock(hdev);
8694
8695 if (timeout && !hdev_is_powered(hdev)) {
8696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8697 MGMT_STATUS_REJECTED);
8698 goto unlock;
8699 }
8700
8701 if (adv_busy(hdev)) {
8702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8703 MGMT_STATUS_BUSY);
8704 goto unlock;
8705 }
8706
8707 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8708 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8709 cp->scan_rsp_len, false)) {
8710 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8711 MGMT_STATUS_INVALID_PARAMS);
8712 goto unlock;
8713 }
8714
8715 prev_instance_cnt = hdev->adv_instance_cnt;
8716
8717 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8718 cp->adv_data_len, cp->data,
8719 cp->scan_rsp_len,
8720 cp->data + cp->adv_data_len,
8721 timeout, duration,
8722 HCI_ADV_TX_POWER_NO_PREFERENCE,
8723 hdev->le_adv_min_interval,
8724 hdev->le_adv_max_interval, 0);
8725 if (IS_ERR(adv)) {
8726 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8727 MGMT_STATUS_FAILED);
8728 goto unlock;
8729 }
8730
8731 /* Only trigger an advertising added event if a new instance was
8732 * actually added.
8733 */
8734 if (hdev->adv_instance_cnt > prev_instance_cnt)
8735 mgmt_advertising_added(sk, hdev, cp->instance);
8736
8737 if (hdev->cur_adv_instance == cp->instance) {
8738 /* If the currently advertised instance is being changed then
8739 * cancel the current advertising and schedule the next
8740 * instance. If there is only one instance then the overridden
8741 * advertising data will be visible right away.
8742 */
8743 cancel_adv_timeout(hdev);
8744
8745 next_instance = hci_get_next_instance(hdev, cp->instance);
8746 if (next_instance)
8747 schedule_instance = next_instance->instance;
8748 } else if (!hdev->adv_instance_timeout) {
8749 /* Immediately advertise the new instance if no other
8750 * instance is currently being advertised.
8751 */
8752 schedule_instance = cp->instance;
8753 }
8754
8755 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8756 * there is no instance to be advertised then we have no HCI
8757 * communication to make. Simply return.
8758 */
8759 if (!hdev_is_powered(hdev) ||
8760 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8761 !schedule_instance) {
8762 rp.instance = cp->instance;
8763 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8764 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8765 goto unlock;
8766 }
8767
8768 /* We're good to go, update advertising data, parameters, and start
8769 * advertising.
8770 */
8771 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8772 data_len);
8773 if (!cmd) {
8774 err = -ENOMEM;
8775 goto unlock;
8776 }
8777
8778 cp->instance = schedule_instance;
8779
8780 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8781 add_advertising_complete);
8782 if (err < 0)
8783 mgmt_pending_free(cmd);
8784
8785 unlock:
8786 hci_dev_unlock(hdev);
8787
8788 return err;
8789 }
8790
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8791 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8792 int err)
8793 {
8794 struct mgmt_pending_cmd *cmd = data;
8795 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8796 struct mgmt_rp_add_ext_adv_params rp;
8797 struct adv_info *adv;
8798 u32 flags;
8799
8800 BT_DBG("%s", hdev->name);
8801
8802 hci_dev_lock(hdev);
8803
8804 adv = hci_find_adv_instance(hdev, cp->instance);
8805 if (!adv)
8806 goto unlock;
8807
8808 rp.instance = cp->instance;
8809 rp.tx_power = adv->tx_power;
8810
8811 /* While we're at it, inform userspace of the available space for this
8812 * advertisement, given the flags that will be used.
8813 */
8814 flags = __le32_to_cpu(cp->flags);
8815 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8816 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8817
8818 if (err) {
8819 /* If this advertisement was previously advertising and we
8820 * failed to update it, we signal that it has been removed and
8821 * delete its structure
8822 */
8823 if (!adv->pending)
8824 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8825
8826 hci_remove_adv_instance(hdev, cp->instance);
8827
8828 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8829 mgmt_status(err));
8830 } else {
8831 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8832 mgmt_status(err), &rp, sizeof(rp));
8833 }
8834
8835 unlock:
8836 if (cmd)
8837 mgmt_pending_free(cmd);
8838
8839 hci_dev_unlock(hdev);
8840 }
8841
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8842 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8843 {
8844 struct mgmt_pending_cmd *cmd = data;
8845 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8846
8847 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8848 }
8849
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8850 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8851 void *data, u16 data_len)
8852 {
8853 struct mgmt_cp_add_ext_adv_params *cp = data;
8854 struct mgmt_rp_add_ext_adv_params rp;
8855 struct mgmt_pending_cmd *cmd = NULL;
8856 struct adv_info *adv;
8857 u32 flags, min_interval, max_interval;
8858 u16 timeout, duration;
8859 u8 status;
8860 s8 tx_power;
8861 int err;
8862
8863 BT_DBG("%s", hdev->name);
8864
8865 status = mgmt_le_support(hdev);
8866 if (status)
8867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8868 status);
8869
8870 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8871 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8872 MGMT_STATUS_INVALID_PARAMS);
8873
8874 /* The purpose of breaking add_advertising into two separate MGMT calls
8875 * for params and data is to allow more parameters to be added to this
8876 * structure in the future. For this reason, we verify that we have the
8877 * bare minimum structure we know of when the interface was defined. Any
8878 * extra parameters we don't know about will be ignored in this request.
8879 */
8880 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882 MGMT_STATUS_INVALID_PARAMS);
8883
8884 flags = __le32_to_cpu(cp->flags);
8885
8886 if (!requested_adv_flags_are_valid(hdev, flags))
8887 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8888 MGMT_STATUS_INVALID_PARAMS);
8889
8890 hci_dev_lock(hdev);
8891
8892 /* In new interface, we require that we are powered to register */
8893 if (!hdev_is_powered(hdev)) {
8894 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8895 MGMT_STATUS_REJECTED);
8896 goto unlock;
8897 }
8898
8899 if (adv_busy(hdev)) {
8900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8901 MGMT_STATUS_BUSY);
8902 goto unlock;
8903 }
8904
8905 /* Parse defined parameters from request, use defaults otherwise */
8906 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8907 __le16_to_cpu(cp->timeout) : 0;
8908
8909 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8910 __le16_to_cpu(cp->duration) :
8911 hdev->def_multi_adv_rotation_duration;
8912
8913 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8914 __le32_to_cpu(cp->min_interval) :
8915 hdev->le_adv_min_interval;
8916
8917 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8918 __le32_to_cpu(cp->max_interval) :
8919 hdev->le_adv_max_interval;
8920
8921 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8922 cp->tx_power :
8923 HCI_ADV_TX_POWER_NO_PREFERENCE;
8924
8925 /* Create advertising instance with no advertising or response data */
8926 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8927 timeout, duration, tx_power, min_interval,
8928 max_interval, 0);
8929
8930 if (IS_ERR(adv)) {
8931 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8932 MGMT_STATUS_FAILED);
8933 goto unlock;
8934 }
8935
8936 /* Submit request for advertising params if ext adv available */
8937 if (ext_adv_capable(hdev)) {
8938 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8939 data, data_len);
8940 if (!cmd) {
8941 err = -ENOMEM;
8942 hci_remove_adv_instance(hdev, cp->instance);
8943 goto unlock;
8944 }
8945
8946 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8947 add_ext_adv_params_complete);
8948 if (err < 0)
8949 mgmt_pending_free(cmd);
8950 } else {
8951 rp.instance = cp->instance;
8952 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8953 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8954 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8955 err = mgmt_cmd_complete(sk, hdev->id,
8956 MGMT_OP_ADD_EXT_ADV_PARAMS,
8957 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8958 }
8959
8960 unlock:
8961 hci_dev_unlock(hdev);
8962
8963 return err;
8964 }
8965
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8966 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8967 {
8968 struct mgmt_pending_cmd *cmd = data;
8969 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8970 struct mgmt_rp_add_advertising rp;
8971
8972 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8973
8974 memset(&rp, 0, sizeof(rp));
8975
8976 rp.instance = cp->instance;
8977
8978 if (err)
8979 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8980 mgmt_status(err));
8981 else
8982 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8983 mgmt_status(err), &rp, sizeof(rp));
8984
8985 mgmt_pending_free(cmd);
8986 }
8987
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8988 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8989 {
8990 struct mgmt_pending_cmd *cmd = data;
8991 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8992 int err;
8993
8994 if (ext_adv_capable(hdev)) {
8995 err = hci_update_adv_data_sync(hdev, cp->instance);
8996 if (err)
8997 return err;
8998
8999 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9000 if (err)
9001 return err;
9002
9003 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9004 }
9005
9006 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9007 }
9008
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9009 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9010 u16 data_len)
9011 {
9012 struct mgmt_cp_add_ext_adv_data *cp = data;
9013 struct mgmt_rp_add_ext_adv_data rp;
9014 u8 schedule_instance = 0;
9015 struct adv_info *next_instance;
9016 struct adv_info *adv_instance;
9017 int err = 0;
9018 struct mgmt_pending_cmd *cmd;
9019
9020 BT_DBG("%s", hdev->name);
9021
9022 hci_dev_lock(hdev);
9023
9024 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9025
9026 if (!adv_instance) {
9027 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9028 MGMT_STATUS_INVALID_PARAMS);
9029 goto unlock;
9030 }
9031
9032 /* In new interface, we require that we are powered to register */
9033 if (!hdev_is_powered(hdev)) {
9034 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9035 MGMT_STATUS_REJECTED);
9036 goto clear_new_instance;
9037 }
9038
9039 if (adv_busy(hdev)) {
9040 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9041 MGMT_STATUS_BUSY);
9042 goto clear_new_instance;
9043 }
9044
9045 /* Validate new data */
9046 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9047 cp->adv_data_len, true) ||
9048 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9049 cp->adv_data_len, cp->scan_rsp_len, false)) {
9050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9051 MGMT_STATUS_INVALID_PARAMS);
9052 goto clear_new_instance;
9053 }
9054
9055 /* Set the data in the advertising instance */
9056 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9057 cp->data, cp->scan_rsp_len,
9058 cp->data + cp->adv_data_len);
9059
9060 /* If using software rotation, determine next instance to use */
9061 if (hdev->cur_adv_instance == cp->instance) {
9062 /* If the currently advertised instance is being changed
9063 * then cancel the current advertising and schedule the
9064 * next instance. If there is only one instance then the
9065 * overridden advertising data will be visible right
9066 * away
9067 */
9068 cancel_adv_timeout(hdev);
9069
9070 next_instance = hci_get_next_instance(hdev, cp->instance);
9071 if (next_instance)
9072 schedule_instance = next_instance->instance;
9073 } else if (!hdev->adv_instance_timeout) {
9074 /* Immediately advertise the new instance if no other
9075 * instance is currently being advertised.
9076 */
9077 schedule_instance = cp->instance;
9078 }
9079
9080 /* If the HCI_ADVERTISING flag is set or there is no instance to
9081 * be advertised then we have no HCI communication to make.
9082 * Simply return.
9083 */
9084 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9085 if (adv_instance->pending) {
9086 mgmt_advertising_added(sk, hdev, cp->instance);
9087 adv_instance->pending = false;
9088 }
9089 rp.instance = cp->instance;
9090 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9091 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9092 goto unlock;
9093 }
9094
9095 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9096 data_len);
9097 if (!cmd) {
9098 err = -ENOMEM;
9099 goto clear_new_instance;
9100 }
9101
9102 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9103 add_ext_adv_data_complete);
9104 if (err < 0) {
9105 mgmt_pending_free(cmd);
9106 goto clear_new_instance;
9107 }
9108
9109 /* We were successful in updating data, so trigger advertising_added
9110 * event if this is an instance that wasn't previously advertising. If
9111 * a failure occurs in the requests we initiated, we will remove the
9112 * instance again in add_advertising_complete
9113 */
9114 if (adv_instance->pending)
9115 mgmt_advertising_added(sk, hdev, cp->instance);
9116
9117 goto unlock;
9118
9119 clear_new_instance:
9120 hci_remove_adv_instance(hdev, cp->instance);
9121
9122 unlock:
9123 hci_dev_unlock(hdev);
9124
9125 return err;
9126 }
9127
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9128 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9129 int err)
9130 {
9131 struct mgmt_pending_cmd *cmd = data;
9132 struct mgmt_cp_remove_advertising *cp = cmd->param;
9133 struct mgmt_rp_remove_advertising rp;
9134
9135 bt_dev_dbg(hdev, "err %d", err);
9136
9137 memset(&rp, 0, sizeof(rp));
9138 rp.instance = cp->instance;
9139
9140 if (err)
9141 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9142 mgmt_status(err));
9143 else
9144 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9145 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9146
9147 mgmt_pending_free(cmd);
9148 }
9149
remove_advertising_sync(struct hci_dev * hdev,void * data)9150 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9151 {
9152 struct mgmt_pending_cmd *cmd = data;
9153 struct mgmt_cp_remove_advertising *cp = cmd->param;
9154 int err;
9155
9156 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9157 if (err)
9158 return err;
9159
9160 if (list_empty(&hdev->adv_instances))
9161 err = hci_disable_advertising_sync(hdev);
9162
9163 return err;
9164 }
9165
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9166 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9167 void *data, u16 data_len)
9168 {
9169 struct mgmt_cp_remove_advertising *cp = data;
9170 struct mgmt_pending_cmd *cmd;
9171 int err;
9172
9173 bt_dev_dbg(hdev, "sock %p", sk);
9174
9175 hci_dev_lock(hdev);
9176
9177 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9178 err = mgmt_cmd_status(sk, hdev->id,
9179 MGMT_OP_REMOVE_ADVERTISING,
9180 MGMT_STATUS_INVALID_PARAMS);
9181 goto unlock;
9182 }
9183
9184 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9185 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9186 MGMT_STATUS_BUSY);
9187 goto unlock;
9188 }
9189
9190 if (list_empty(&hdev->adv_instances)) {
9191 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9192 MGMT_STATUS_INVALID_PARAMS);
9193 goto unlock;
9194 }
9195
9196 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9197 data_len);
9198 if (!cmd) {
9199 err = -ENOMEM;
9200 goto unlock;
9201 }
9202
9203 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9204 remove_advertising_complete);
9205 if (err < 0)
9206 mgmt_pending_free(cmd);
9207
9208 unlock:
9209 hci_dev_unlock(hdev);
9210
9211 return err;
9212 }
9213
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9214 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9215 void *data, u16 data_len)
9216 {
9217 struct mgmt_cp_get_adv_size_info *cp = data;
9218 struct mgmt_rp_get_adv_size_info rp;
9219 u32 flags, supported_flags;
9220
9221 bt_dev_dbg(hdev, "sock %p", sk);
9222
9223 if (!lmp_le_capable(hdev))
9224 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9225 MGMT_STATUS_REJECTED);
9226
9227 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9228 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9229 MGMT_STATUS_INVALID_PARAMS);
9230
9231 flags = __le32_to_cpu(cp->flags);
9232
9233 /* The current implementation only supports a subset of the specified
9234 * flags.
9235 */
9236 supported_flags = get_supported_adv_flags(hdev);
9237 if (flags & ~supported_flags)
9238 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9239 MGMT_STATUS_INVALID_PARAMS);
9240
9241 rp.instance = cp->instance;
9242 rp.flags = cp->flags;
9243 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9244 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9245
9246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9247 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9248 }
9249
9250 static const struct hci_mgmt_handler mgmt_handlers[] = {
9251 { NULL }, /* 0x0000 (no command) */
9252 { read_version, MGMT_READ_VERSION_SIZE,
9253 HCI_MGMT_NO_HDEV |
9254 HCI_MGMT_UNTRUSTED },
9255 { read_commands, MGMT_READ_COMMANDS_SIZE,
9256 HCI_MGMT_NO_HDEV |
9257 HCI_MGMT_UNTRUSTED },
9258 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9259 HCI_MGMT_NO_HDEV |
9260 HCI_MGMT_UNTRUSTED },
9261 { read_controller_info, MGMT_READ_INFO_SIZE,
9262 HCI_MGMT_UNTRUSTED },
9263 { set_powered, MGMT_SETTING_SIZE },
9264 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9265 { set_connectable, MGMT_SETTING_SIZE },
9266 { set_fast_connectable, MGMT_SETTING_SIZE },
9267 { set_bondable, MGMT_SETTING_SIZE },
9268 { set_link_security, MGMT_SETTING_SIZE },
9269 { set_ssp, MGMT_SETTING_SIZE },
9270 { set_hs, MGMT_SETTING_SIZE },
9271 { set_le, MGMT_SETTING_SIZE },
9272 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9273 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9274 { add_uuid, MGMT_ADD_UUID_SIZE },
9275 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9276 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9277 HCI_MGMT_VAR_LEN },
9278 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9279 HCI_MGMT_VAR_LEN },
9280 { disconnect, MGMT_DISCONNECT_SIZE },
9281 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9282 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9283 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9284 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9285 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9286 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9287 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9288 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9289 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9290 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9291 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9292 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9293 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9294 HCI_MGMT_VAR_LEN },
9295 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9296 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9297 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9298 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9299 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9300 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9301 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9302 { set_advertising, MGMT_SETTING_SIZE },
9303 { set_bredr, MGMT_SETTING_SIZE },
9304 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9305 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9306 { set_secure_conn, MGMT_SETTING_SIZE },
9307 { set_debug_keys, MGMT_SETTING_SIZE },
9308 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9309 { load_irks, MGMT_LOAD_IRKS_SIZE,
9310 HCI_MGMT_VAR_LEN },
9311 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9312 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9313 { add_device, MGMT_ADD_DEVICE_SIZE },
9314 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9315 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9316 HCI_MGMT_VAR_LEN },
9317 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9318 HCI_MGMT_NO_HDEV |
9319 HCI_MGMT_UNTRUSTED },
9320 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9321 HCI_MGMT_UNCONFIGURED |
9322 HCI_MGMT_UNTRUSTED },
9323 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9324 HCI_MGMT_UNCONFIGURED },
9325 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9326 HCI_MGMT_UNCONFIGURED },
9327 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9328 HCI_MGMT_VAR_LEN },
9329 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9330 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9331 HCI_MGMT_NO_HDEV |
9332 HCI_MGMT_UNTRUSTED },
9333 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9334 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9335 HCI_MGMT_VAR_LEN },
9336 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9337 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9338 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9339 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9340 HCI_MGMT_UNTRUSTED },
9341 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9342 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9343 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9344 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9345 HCI_MGMT_VAR_LEN },
9346 { set_wideband_speech, MGMT_SETTING_SIZE },
9347 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9348 HCI_MGMT_UNTRUSTED },
9349 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9350 HCI_MGMT_UNTRUSTED |
9351 HCI_MGMT_HDEV_OPTIONAL },
9352 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9353 HCI_MGMT_VAR_LEN |
9354 HCI_MGMT_HDEV_OPTIONAL },
9355 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9356 HCI_MGMT_UNTRUSTED },
9357 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9358 HCI_MGMT_VAR_LEN },
9359 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9360 HCI_MGMT_UNTRUSTED },
9361 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9362 HCI_MGMT_VAR_LEN },
9363 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9364 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9365 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9366 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9367 HCI_MGMT_VAR_LEN },
9368 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9369 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9370 HCI_MGMT_VAR_LEN },
9371 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9372 HCI_MGMT_VAR_LEN },
9373 { add_adv_patterns_monitor_rssi,
9374 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9375 HCI_MGMT_VAR_LEN },
9376 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9377 HCI_MGMT_VAR_LEN },
9378 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9379 { mesh_send, MGMT_MESH_SEND_SIZE,
9380 HCI_MGMT_VAR_LEN },
9381 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9382 };
9383
mgmt_index_added(struct hci_dev * hdev)9384 void mgmt_index_added(struct hci_dev *hdev)
9385 {
9386 struct mgmt_ev_ext_index ev;
9387
9388 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9389 return;
9390
9391 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9392 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9393 HCI_MGMT_UNCONF_INDEX_EVENTS);
9394 ev.type = 0x01;
9395 } else {
9396 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9397 HCI_MGMT_INDEX_EVENTS);
9398 ev.type = 0x00;
9399 }
9400
9401 ev.bus = hdev->bus;
9402
9403 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9404 HCI_MGMT_EXT_INDEX_EVENTS);
9405 }
9406
mgmt_index_removed(struct hci_dev * hdev)9407 void mgmt_index_removed(struct hci_dev *hdev)
9408 {
9409 struct mgmt_ev_ext_index ev;
9410 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9411
9412 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9413 return;
9414
9415 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9416
9417 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9418 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9419 HCI_MGMT_UNCONF_INDEX_EVENTS);
9420 ev.type = 0x01;
9421 } else {
9422 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9423 HCI_MGMT_INDEX_EVENTS);
9424 ev.type = 0x00;
9425 }
9426
9427 ev.bus = hdev->bus;
9428
9429 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9430 HCI_MGMT_EXT_INDEX_EVENTS);
9431
9432 /* Cancel any remaining timed work */
9433 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9434 return;
9435 cancel_delayed_work_sync(&hdev->discov_off);
9436 cancel_delayed_work_sync(&hdev->service_cache);
9437 cancel_delayed_work_sync(&hdev->rpa_expired);
9438 }
9439
mgmt_power_on(struct hci_dev * hdev,int err)9440 void mgmt_power_on(struct hci_dev *hdev, int err)
9441 {
9442 struct cmd_lookup match = { NULL, hdev };
9443
9444 bt_dev_dbg(hdev, "err %d", err);
9445
9446 hci_dev_lock(hdev);
9447
9448 if (!err) {
9449 restart_le_actions(hdev);
9450 hci_update_passive_scan(hdev);
9451 }
9452
9453 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9454
9455 new_settings(hdev, match.sk);
9456
9457 if (match.sk)
9458 sock_put(match.sk);
9459
9460 hci_dev_unlock(hdev);
9461 }
9462
__mgmt_power_off(struct hci_dev * hdev)9463 void __mgmt_power_off(struct hci_dev *hdev)
9464 {
9465 struct cmd_lookup match = { NULL, hdev };
9466 u8 zero_cod[] = { 0, 0, 0 };
9467
9468 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9469
9470 /* If the power off is because of hdev unregistration let
9471 * use the appropriate INVALID_INDEX status. Otherwise use
9472 * NOT_POWERED. We cover both scenarios here since later in
9473 * mgmt_index_removed() any hci_conn callbacks will have already
9474 * been triggered, potentially causing misleading DISCONNECTED
9475 * status responses.
9476 */
9477 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9478 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9479 else
9480 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9481
9482 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9483
9484 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9485 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9486 zero_cod, sizeof(zero_cod),
9487 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9488 ext_info_changed(hdev, NULL);
9489 }
9490
9491 new_settings(hdev, match.sk);
9492
9493 if (match.sk)
9494 sock_put(match.sk);
9495 }
9496
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9497 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9498 {
9499 struct mgmt_pending_cmd *cmd;
9500 u8 status;
9501
9502 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9503 if (!cmd)
9504 return;
9505
9506 if (err == -ERFKILL)
9507 status = MGMT_STATUS_RFKILLED;
9508 else
9509 status = MGMT_STATUS_FAILED;
9510
9511 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9512
9513 mgmt_pending_remove(cmd);
9514 }
9515
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9516 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9517 bool persistent)
9518 {
9519 struct mgmt_ev_new_link_key ev;
9520
9521 memset(&ev, 0, sizeof(ev));
9522
9523 ev.store_hint = persistent;
9524 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9525 ev.key.addr.type = BDADDR_BREDR;
9526 ev.key.type = key->type;
9527 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9528 ev.key.pin_len = key->pin_len;
9529
9530 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9531 }
9532
mgmt_ltk_type(struct smp_ltk * ltk)9533 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9534 {
9535 switch (ltk->type) {
9536 case SMP_LTK:
9537 case SMP_LTK_RESPONDER:
9538 if (ltk->authenticated)
9539 return MGMT_LTK_AUTHENTICATED;
9540 return MGMT_LTK_UNAUTHENTICATED;
9541 case SMP_LTK_P256:
9542 if (ltk->authenticated)
9543 return MGMT_LTK_P256_AUTH;
9544 return MGMT_LTK_P256_UNAUTH;
9545 case SMP_LTK_P256_DEBUG:
9546 return MGMT_LTK_P256_DEBUG;
9547 }
9548
9549 return MGMT_LTK_UNAUTHENTICATED;
9550 }
9551
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9552 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9553 {
9554 struct mgmt_ev_new_long_term_key ev;
9555
9556 memset(&ev, 0, sizeof(ev));
9557
9558 /* Devices using resolvable or non-resolvable random addresses
9559 * without providing an identity resolving key don't require
9560 * to store long term keys. Their addresses will change the
9561 * next time around.
9562 *
9563 * Only when a remote device provides an identity address
9564 * make sure the long term key is stored. If the remote
9565 * identity is known, the long term keys are internally
9566 * mapped to the identity address. So allow static random
9567 * and public addresses here.
9568 */
9569 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9570 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9571 ev.store_hint = 0x00;
9572 else
9573 ev.store_hint = persistent;
9574
9575 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9576 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9577 ev.key.type = mgmt_ltk_type(key);
9578 ev.key.enc_size = key->enc_size;
9579 ev.key.ediv = key->ediv;
9580 ev.key.rand = key->rand;
9581
9582 if (key->type == SMP_LTK)
9583 ev.key.initiator = 1;
9584
9585 /* Make sure we copy only the significant bytes based on the
9586 * encryption key size, and set the rest of the value to zeroes.
9587 */
9588 memcpy(ev.key.val, key->val, key->enc_size);
9589 memset(ev.key.val + key->enc_size, 0,
9590 sizeof(ev.key.val) - key->enc_size);
9591
9592 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9593 }
9594
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9595 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9596 {
9597 struct mgmt_ev_new_irk ev;
9598
9599 memset(&ev, 0, sizeof(ev));
9600
9601 ev.store_hint = persistent;
9602
9603 bacpy(&ev.rpa, &irk->rpa);
9604 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9605 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9606 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9607
9608 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9609 }
9610
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9611 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9612 bool persistent)
9613 {
9614 struct mgmt_ev_new_csrk ev;
9615
9616 memset(&ev, 0, sizeof(ev));
9617
9618 /* Devices using resolvable or non-resolvable random addresses
9619 * without providing an identity resolving key don't require
9620 * to store signature resolving keys. Their addresses will change
9621 * the next time around.
9622 *
9623 * Only when a remote device provides an identity address
9624 * make sure the signature resolving key is stored. So allow
9625 * static random and public addresses here.
9626 */
9627 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9628 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9629 ev.store_hint = 0x00;
9630 else
9631 ev.store_hint = persistent;
9632
9633 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9634 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9635 ev.key.type = csrk->type;
9636 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9637
9638 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9639 }
9640
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9641 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9642 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9643 u16 max_interval, u16 latency, u16 timeout)
9644 {
9645 struct mgmt_ev_new_conn_param ev;
9646
9647 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9648 return;
9649
9650 memset(&ev, 0, sizeof(ev));
9651 bacpy(&ev.addr.bdaddr, bdaddr);
9652 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9653 ev.store_hint = store_hint;
9654 ev.min_interval = cpu_to_le16(min_interval);
9655 ev.max_interval = cpu_to_le16(max_interval);
9656 ev.latency = cpu_to_le16(latency);
9657 ev.timeout = cpu_to_le16(timeout);
9658
9659 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9660 }
9661
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9662 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9663 u8 *name, u8 name_len)
9664 {
9665 struct sk_buff *skb;
9666 struct mgmt_ev_device_connected *ev;
9667 u16 eir_len = 0;
9668 u32 flags = 0;
9669
9670 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9671 return;
9672
9673 /* allocate buff for LE or BR/EDR adv */
9674 if (conn->le_adv_data_len > 0)
9675 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9676 sizeof(*ev) + conn->le_adv_data_len);
9677 else
9678 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9679 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9680 eir_precalc_len(sizeof(conn->dev_class)));
9681
9682 ev = skb_put(skb, sizeof(*ev));
9683 bacpy(&ev->addr.bdaddr, &conn->dst);
9684 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9685
9686 if (conn->out)
9687 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9688
9689 ev->flags = __cpu_to_le32(flags);
9690
9691 /* We must ensure that the EIR Data fields are ordered and
9692 * unique. Keep it simple for now and avoid the problem by not
9693 * adding any BR/EDR data to the LE adv.
9694 */
9695 if (conn->le_adv_data_len > 0) {
9696 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9697 eir_len = conn->le_adv_data_len;
9698 } else {
9699 if (name)
9700 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9701
9702 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9703 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9704 conn->dev_class, sizeof(conn->dev_class));
9705 }
9706
9707 ev->eir_len = cpu_to_le16(eir_len);
9708
9709 mgmt_event_skb(skb, NULL);
9710 }
9711
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9712 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9713 {
9714 struct hci_dev *hdev = data;
9715 struct mgmt_cp_unpair_device *cp = cmd->param;
9716
9717 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9718
9719 cmd->cmd_complete(cmd, 0);
9720 mgmt_pending_remove(cmd);
9721 }
9722
mgmt_powering_down(struct hci_dev * hdev)9723 bool mgmt_powering_down(struct hci_dev *hdev)
9724 {
9725 struct mgmt_pending_cmd *cmd;
9726 struct mgmt_mode *cp;
9727
9728 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9729 if (!cmd)
9730 return false;
9731
9732 cp = cmd->param;
9733 if (!cp->val)
9734 return true;
9735
9736 return false;
9737 }
9738
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9739 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9740 u8 link_type, u8 addr_type, u8 reason,
9741 bool mgmt_connected)
9742 {
9743 struct mgmt_ev_device_disconnected ev;
9744 struct sock *sk = NULL;
9745
9746 if (!mgmt_connected)
9747 return;
9748
9749 if (link_type != ACL_LINK && link_type != LE_LINK)
9750 return;
9751
9752 bacpy(&ev.addr.bdaddr, bdaddr);
9753 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9754 ev.reason = reason;
9755
9756 /* Report disconnects due to suspend */
9757 if (hdev->suspended)
9758 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9759
9760 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9761
9762 if (sk)
9763 sock_put(sk);
9764 }
9765
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9766 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9767 u8 link_type, u8 addr_type, u8 status)
9768 {
9769 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9770 struct mgmt_cp_disconnect *cp;
9771 struct mgmt_pending_cmd *cmd;
9772
9773 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9774 hdev);
9775
9776 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9777 if (!cmd)
9778 return;
9779
9780 cp = cmd->param;
9781
9782 if (bacmp(bdaddr, &cp->addr.bdaddr))
9783 return;
9784
9785 if (cp->addr.type != bdaddr_type)
9786 return;
9787
9788 cmd->cmd_complete(cmd, mgmt_status(status));
9789 mgmt_pending_remove(cmd);
9790 }
9791
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9792 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9793 {
9794 struct mgmt_ev_connect_failed ev;
9795
9796 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9797 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9798 conn->dst_type, status, true);
9799 return;
9800 }
9801
9802 bacpy(&ev.addr.bdaddr, &conn->dst);
9803 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9804 ev.status = mgmt_status(status);
9805
9806 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9807 }
9808
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9809 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9810 {
9811 struct mgmt_ev_pin_code_request ev;
9812
9813 bacpy(&ev.addr.bdaddr, bdaddr);
9814 ev.addr.type = BDADDR_BREDR;
9815 ev.secure = secure;
9816
9817 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9818 }
9819
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9820 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9821 u8 status)
9822 {
9823 struct mgmt_pending_cmd *cmd;
9824
9825 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9826 if (!cmd)
9827 return;
9828
9829 cmd->cmd_complete(cmd, mgmt_status(status));
9830 mgmt_pending_remove(cmd);
9831 }
9832
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9833 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 u8 status)
9835 {
9836 struct mgmt_pending_cmd *cmd;
9837
9838 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9839 if (!cmd)
9840 return;
9841
9842 cmd->cmd_complete(cmd, mgmt_status(status));
9843 mgmt_pending_remove(cmd);
9844 }
9845
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9846 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9847 u8 link_type, u8 addr_type, u32 value,
9848 u8 confirm_hint)
9849 {
9850 struct mgmt_ev_user_confirm_request ev;
9851
9852 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9853
9854 bacpy(&ev.addr.bdaddr, bdaddr);
9855 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9856 ev.confirm_hint = confirm_hint;
9857 ev.value = cpu_to_le32(value);
9858
9859 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9860 NULL);
9861 }
9862
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9863 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9864 u8 link_type, u8 addr_type)
9865 {
9866 struct mgmt_ev_user_passkey_request ev;
9867
9868 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9869
9870 bacpy(&ev.addr.bdaddr, bdaddr);
9871 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9872
9873 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9874 NULL);
9875 }
9876
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9877 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9878 u8 link_type, u8 addr_type, u8 status,
9879 u8 opcode)
9880 {
9881 struct mgmt_pending_cmd *cmd;
9882
9883 cmd = pending_find(opcode, hdev);
9884 if (!cmd)
9885 return -ENOENT;
9886
9887 cmd->cmd_complete(cmd, mgmt_status(status));
9888 mgmt_pending_remove(cmd);
9889
9890 return 0;
9891 }
9892
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9893 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9894 u8 link_type, u8 addr_type, u8 status)
9895 {
9896 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9897 status, MGMT_OP_USER_CONFIRM_REPLY);
9898 }
9899
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9900 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9901 u8 link_type, u8 addr_type, u8 status)
9902 {
9903 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9904 status,
9905 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9906 }
9907
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9908 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9909 u8 link_type, u8 addr_type, u8 status)
9910 {
9911 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9912 status, MGMT_OP_USER_PASSKEY_REPLY);
9913 }
9914
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9915 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9916 u8 link_type, u8 addr_type, u8 status)
9917 {
9918 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9919 status,
9920 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9921 }
9922
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9923 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9924 u8 link_type, u8 addr_type, u32 passkey,
9925 u8 entered)
9926 {
9927 struct mgmt_ev_passkey_notify ev;
9928
9929 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9930
9931 bacpy(&ev.addr.bdaddr, bdaddr);
9932 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9933 ev.passkey = __cpu_to_le32(passkey);
9934 ev.entered = entered;
9935
9936 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9937 }
9938
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9939 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9940 {
9941 struct mgmt_ev_auth_failed ev;
9942 struct mgmt_pending_cmd *cmd;
9943 u8 status = mgmt_status(hci_status);
9944
9945 bacpy(&ev.addr.bdaddr, &conn->dst);
9946 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9947 ev.status = status;
9948
9949 cmd = find_pairing(conn);
9950
9951 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9952 cmd ? cmd->sk : NULL);
9953
9954 if (cmd) {
9955 cmd->cmd_complete(cmd, status);
9956 mgmt_pending_remove(cmd);
9957 }
9958 }
9959
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9960 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9961 {
9962 struct cmd_lookup match = { NULL, hdev };
9963 bool changed;
9964
9965 if (status) {
9966 u8 mgmt_err = mgmt_status(status);
9967 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9968 cmd_status_rsp, &mgmt_err);
9969 return;
9970 }
9971
9972 if (test_bit(HCI_AUTH, &hdev->flags))
9973 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9974 else
9975 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9976
9977 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9978 &match);
9979
9980 if (changed)
9981 new_settings(hdev, match.sk);
9982
9983 if (match.sk)
9984 sock_put(match.sk);
9985 }
9986
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9987 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9988 {
9989 struct cmd_lookup *match = data;
9990
9991 if (match->sk == NULL) {
9992 match->sk = cmd->sk;
9993 sock_hold(match->sk);
9994 }
9995 }
9996
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9997 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9998 u8 status)
9999 {
10000 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10001
10002 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10003 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10004 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10005
10006 if (!status) {
10007 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10008 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10009 ext_info_changed(hdev, NULL);
10010 }
10011
10012 if (match.sk)
10013 sock_put(match.sk);
10014 }
10015
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10016 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10017 {
10018 struct mgmt_cp_set_local_name ev;
10019 struct mgmt_pending_cmd *cmd;
10020
10021 if (status)
10022 return;
10023
10024 memset(&ev, 0, sizeof(ev));
10025 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10026 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10027
10028 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10029 if (!cmd) {
10030 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10031
10032 /* If this is a HCI command related to powering on the
10033 * HCI dev don't send any mgmt signals.
10034 */
10035 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10036 return;
10037 }
10038
10039 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10040 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10041 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10042 }
10043
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10044 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10045 {
10046 int i;
10047
10048 for (i = 0; i < uuid_count; i++) {
10049 if (!memcmp(uuid, uuids[i], 16))
10050 return true;
10051 }
10052
10053 return false;
10054 }
10055
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10056 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10057 {
10058 u16 parsed = 0;
10059
10060 while (parsed < eir_len) {
10061 u8 field_len = eir[0];
10062 u8 uuid[16];
10063 int i;
10064
10065 if (field_len == 0)
10066 break;
10067
10068 if (eir_len - parsed < field_len + 1)
10069 break;
10070
10071 switch (eir[1]) {
10072 case EIR_UUID16_ALL:
10073 case EIR_UUID16_SOME:
10074 for (i = 0; i + 3 <= field_len; i += 2) {
10075 memcpy(uuid, bluetooth_base_uuid, 16);
10076 uuid[13] = eir[i + 3];
10077 uuid[12] = eir[i + 2];
10078 if (has_uuid(uuid, uuid_count, uuids))
10079 return true;
10080 }
10081 break;
10082 case EIR_UUID32_ALL:
10083 case EIR_UUID32_SOME:
10084 for (i = 0; i + 5 <= field_len; i += 4) {
10085 memcpy(uuid, bluetooth_base_uuid, 16);
10086 uuid[15] = eir[i + 5];
10087 uuid[14] = eir[i + 4];
10088 uuid[13] = eir[i + 3];
10089 uuid[12] = eir[i + 2];
10090 if (has_uuid(uuid, uuid_count, uuids))
10091 return true;
10092 }
10093 break;
10094 case EIR_UUID128_ALL:
10095 case EIR_UUID128_SOME:
10096 for (i = 0; i + 17 <= field_len; i += 16) {
10097 memcpy(uuid, eir + i + 2, 16);
10098 if (has_uuid(uuid, uuid_count, uuids))
10099 return true;
10100 }
10101 break;
10102 }
10103
10104 parsed += field_len + 1;
10105 eir += field_len + 1;
10106 }
10107
10108 return false;
10109 }
10110
restart_le_scan(struct hci_dev * hdev)10111 static void restart_le_scan(struct hci_dev *hdev)
10112 {
10113 /* If controller is not scanning we are done. */
10114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10115 return;
10116
10117 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10118 hdev->discovery.scan_start +
10119 hdev->discovery.scan_duration))
10120 return;
10121
10122 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10123 DISCOV_LE_RESTART_DELAY);
10124 }
10125
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10126 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10127 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10128 {
10129 /* If a RSSI threshold has been specified, and
10130 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10131 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10132 * is set, let it through for further processing, as we might need to
10133 * restart the scan.
10134 *
10135 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10136 * the results are also dropped.
10137 */
10138 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10139 (rssi == HCI_RSSI_INVALID ||
10140 (rssi < hdev->discovery.rssi &&
10141 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10142 return false;
10143
10144 if (hdev->discovery.uuid_count != 0) {
10145 /* If a list of UUIDs is provided in filter, results with no
10146 * matching UUID should be dropped.
10147 */
10148 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10149 hdev->discovery.uuids) &&
10150 !eir_has_uuids(scan_rsp, scan_rsp_len,
10151 hdev->discovery.uuid_count,
10152 hdev->discovery.uuids))
10153 return false;
10154 }
10155
10156 /* If duplicate filtering does not report RSSI changes, then restart
10157 * scanning to ensure updated result with updated RSSI values.
10158 */
10159 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10160 restart_le_scan(hdev);
10161
10162 /* Validate RSSI value against the RSSI threshold once more. */
10163 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10164 rssi < hdev->discovery.rssi)
10165 return false;
10166 }
10167
10168 return true;
10169 }
10170
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10171 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10172 bdaddr_t *bdaddr, u8 addr_type)
10173 {
10174 struct mgmt_ev_adv_monitor_device_lost ev;
10175
10176 ev.monitor_handle = cpu_to_le16(handle);
10177 bacpy(&ev.addr.bdaddr, bdaddr);
10178 ev.addr.type = addr_type;
10179
10180 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10181 NULL);
10182 }
10183
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10184 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10185 struct sk_buff *skb,
10186 struct sock *skip_sk,
10187 u16 handle)
10188 {
10189 struct sk_buff *advmon_skb;
10190 size_t advmon_skb_len;
10191 __le16 *monitor_handle;
10192
10193 if (!skb)
10194 return;
10195
10196 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10197 sizeof(struct mgmt_ev_device_found)) + skb->len;
10198 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10199 advmon_skb_len);
10200 if (!advmon_skb)
10201 return;
10202
10203 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10204 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10205 * store monitor_handle of the matched monitor.
10206 */
10207 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10208 *monitor_handle = cpu_to_le16(handle);
10209 skb_put_data(advmon_skb, skb->data, skb->len);
10210
10211 mgmt_event_skb(advmon_skb, skip_sk);
10212 }
10213
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10214 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10215 bdaddr_t *bdaddr, bool report_device,
10216 struct sk_buff *skb,
10217 struct sock *skip_sk)
10218 {
10219 struct monitored_device *dev, *tmp;
10220 bool matched = false;
10221 bool notified = false;
10222
10223 /* We have received the Advertisement Report because:
10224 * 1. the kernel has initiated active discovery
10225 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10226 * passive scanning
10227 * 3. if none of the above is true, we have one or more active
10228 * Advertisement Monitor
10229 *
10230 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10231 * and report ONLY one advertisement per device for the matched Monitor
10232 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10233 *
10234 * For case 3, since we are not active scanning and all advertisements
10235 * received are due to a matched Advertisement Monitor, report all
10236 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10237 */
10238 if (report_device && !hdev->advmon_pend_notify) {
10239 mgmt_event_skb(skb, skip_sk);
10240 return;
10241 }
10242
10243 hdev->advmon_pend_notify = false;
10244
10245 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10246 if (!bacmp(&dev->bdaddr, bdaddr)) {
10247 matched = true;
10248
10249 if (!dev->notified) {
10250 mgmt_send_adv_monitor_device_found(hdev, skb,
10251 skip_sk,
10252 dev->handle);
10253 notified = true;
10254 dev->notified = true;
10255 }
10256 }
10257
10258 if (!dev->notified)
10259 hdev->advmon_pend_notify = true;
10260 }
10261
10262 if (!report_device &&
10263 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10264 /* Handle 0 indicates that we are not active scanning and this
10265 * is a subsequent advertisement report for an already matched
10266 * Advertisement Monitor or the controller offloading support
10267 * is not available.
10268 */
10269 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10270 }
10271
10272 if (report_device)
10273 mgmt_event_skb(skb, skip_sk);
10274 else
10275 kfree_skb(skb);
10276 }
10277
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10278 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10279 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10280 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10281 u64 instant)
10282 {
10283 struct sk_buff *skb;
10284 struct mgmt_ev_mesh_device_found *ev;
10285 int i, j;
10286
10287 if (!hdev->mesh_ad_types[0])
10288 goto accepted;
10289
10290 /* Scan for requested AD types */
10291 if (eir_len > 0) {
10292 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10293 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10294 if (!hdev->mesh_ad_types[j])
10295 break;
10296
10297 if (hdev->mesh_ad_types[j] == eir[i + 1])
10298 goto accepted;
10299 }
10300 }
10301 }
10302
10303 if (scan_rsp_len > 0) {
10304 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10305 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10306 if (!hdev->mesh_ad_types[j])
10307 break;
10308
10309 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10310 goto accepted;
10311 }
10312 }
10313 }
10314
10315 return;
10316
10317 accepted:
10318 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10319 sizeof(*ev) + eir_len + scan_rsp_len);
10320 if (!skb)
10321 return;
10322
10323 ev = skb_put(skb, sizeof(*ev));
10324
10325 bacpy(&ev->addr.bdaddr, bdaddr);
10326 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10327 ev->rssi = rssi;
10328 ev->flags = cpu_to_le32(flags);
10329 ev->instant = cpu_to_le64(instant);
10330
10331 if (eir_len > 0)
10332 /* Copy EIR or advertising data into event */
10333 skb_put_data(skb, eir, eir_len);
10334
10335 if (scan_rsp_len > 0)
10336 /* Append scan response data to event */
10337 skb_put_data(skb, scan_rsp, scan_rsp_len);
10338
10339 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10340
10341 mgmt_event_skb(skb, NULL);
10342 }
10343
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10344 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10345 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10346 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10347 u64 instant)
10348 {
10349 struct sk_buff *skb;
10350 struct mgmt_ev_device_found *ev;
10351 bool report_device = hci_discovery_active(hdev);
10352
10353 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10354 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10355 eir, eir_len, scan_rsp, scan_rsp_len,
10356 instant);
10357
10358 /* Don't send events for a non-kernel initiated discovery. With
10359 * LE one exception is if we have pend_le_reports > 0 in which
10360 * case we're doing passive scanning and want these events.
10361 */
10362 if (!hci_discovery_active(hdev)) {
10363 if (link_type == ACL_LINK)
10364 return;
10365 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10366 report_device = true;
10367 else if (!hci_is_adv_monitoring(hdev))
10368 return;
10369 }
10370
10371 if (hdev->discovery.result_filtering) {
10372 /* We are using service discovery */
10373 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10374 scan_rsp_len))
10375 return;
10376 }
10377
10378 if (hdev->discovery.limited) {
10379 /* Check for limited discoverable bit */
10380 if (dev_class) {
10381 if (!(dev_class[1] & 0x20))
10382 return;
10383 } else {
10384 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10385 if (!flags || !(flags[0] & LE_AD_LIMITED))
10386 return;
10387 }
10388 }
10389
10390 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10391 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10392 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10393 if (!skb)
10394 return;
10395
10396 ev = skb_put(skb, sizeof(*ev));
10397
10398 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10399 * RSSI value was reported as 0 when not available. This behavior
10400 * is kept when using device discovery. This is required for full
10401 * backwards compatibility with the API.
10402 *
10403 * However when using service discovery, the value 127 will be
10404 * returned when the RSSI is not available.
10405 */
10406 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10407 link_type == ACL_LINK)
10408 rssi = 0;
10409
10410 bacpy(&ev->addr.bdaddr, bdaddr);
10411 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10412 ev->rssi = rssi;
10413 ev->flags = cpu_to_le32(flags);
10414
10415 if (eir_len > 0)
10416 /* Copy EIR or advertising data into event */
10417 skb_put_data(skb, eir, eir_len);
10418
10419 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10420 u8 eir_cod[5];
10421
10422 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10423 dev_class, 3);
10424 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10425 }
10426
10427 if (scan_rsp_len > 0)
10428 /* Append scan response data to event */
10429 skb_put_data(skb, scan_rsp, scan_rsp_len);
10430
10431 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10432
10433 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10434 }
10435
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10436 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10437 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10438 {
10439 struct sk_buff *skb;
10440 struct mgmt_ev_device_found *ev;
10441 u16 eir_len = 0;
10442 u32 flags = 0;
10443
10444 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10445 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10446
10447 ev = skb_put(skb, sizeof(*ev));
10448 bacpy(&ev->addr.bdaddr, bdaddr);
10449 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10450 ev->rssi = rssi;
10451
10452 if (name)
10453 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10454 else
10455 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10456
10457 ev->eir_len = cpu_to_le16(eir_len);
10458 ev->flags = cpu_to_le32(flags);
10459
10460 mgmt_event_skb(skb, NULL);
10461 }
10462
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10463 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10464 {
10465 struct mgmt_ev_discovering ev;
10466
10467 bt_dev_dbg(hdev, "discovering %u", discovering);
10468
10469 memset(&ev, 0, sizeof(ev));
10470 ev.type = hdev->discovery.type;
10471 ev.discovering = discovering;
10472
10473 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10474 }
10475
mgmt_suspending(struct hci_dev * hdev,u8 state)10476 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10477 {
10478 struct mgmt_ev_controller_suspend ev;
10479
10480 ev.suspend_state = state;
10481 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10482 }
10483
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10484 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10485 u8 addr_type)
10486 {
10487 struct mgmt_ev_controller_resume ev;
10488
10489 ev.wake_reason = reason;
10490 if (bdaddr) {
10491 bacpy(&ev.addr.bdaddr, bdaddr);
10492 ev.addr.type = addr_type;
10493 } else {
10494 memset(&ev.addr, 0, sizeof(ev.addr));
10495 }
10496
10497 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10498 }
10499
10500 static struct hci_mgmt_chan chan = {
10501 .channel = HCI_CHANNEL_CONTROL,
10502 .handler_count = ARRAY_SIZE(mgmt_handlers),
10503 .handlers = mgmt_handlers,
10504 .hdev_init = mgmt_init_hdev,
10505 };
10506
mgmt_init(void)10507 int mgmt_init(void)
10508 {
10509 return hci_mgmt_chan_register(&chan);
10510 }
10511
mgmt_exit(void)10512 void mgmt_exit(void)
10513 {
10514 hci_mgmt_chan_unregister(&chan);
10515 }
10516
mgmt_cleanup(struct sock * sk)10517 void mgmt_cleanup(struct sock *sk)
10518 {
10519 struct mgmt_mesh_tx *mesh_tx;
10520 struct hci_dev *hdev;
10521
10522 read_lock(&hci_dev_list_lock);
10523
10524 list_for_each_entry(hdev, &hci_dev_list, list) {
10525 do {
10526 mesh_tx = mgmt_mesh_next(hdev, sk);
10527
10528 if (mesh_tx)
10529 mesh_send_complete(hdev, mesh_tx, true);
10530 } while (mesh_tx);
10531 }
10532
10533 read_unlock(&hci_dev_list_lock);
10534 }
10535