1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
46
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855
856 return settings;
857 }
858
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 u32 settings = 0;
862
863 if (hdev_is_powered(hdev))
864 settings |= MGMT_SETTING_POWERED;
865
866 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 settings |= MGMT_SETTING_CONNECTABLE;
868
869 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 settings |= MGMT_SETTING_DISCOVERABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 settings |= MGMT_SETTING_BONDABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 settings |= MGMT_SETTING_BREDR;
880
881 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 settings |= MGMT_SETTING_LE;
883
884 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 settings |= MGMT_SETTING_LINK_SECURITY;
886
887 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 settings |= MGMT_SETTING_SSP;
889
890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 settings |= MGMT_SETTING_ADVERTISING;
892
893 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 settings |= MGMT_SETTING_SECURE_CONN;
895
896 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 settings |= MGMT_SETTING_DEBUG_KEYS;
898
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 settings |= MGMT_SETTING_PRIVACY;
901
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
905 *
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
909 *
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
912 * be evaluated.
913 */
914 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 settings |= MGMT_SETTING_STATIC_ADDRESS;
919 }
920
921 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923
924 if (cis_central_capable(hdev))
925 settings |= MGMT_SETTING_CIS_CENTRAL;
926
927 if (cis_peripheral_capable(hdev))
928 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929
930 if (bis_capable(hdev))
931 settings |= MGMT_SETTING_ISO_BROADCASTER;
932
933 if (sync_recv_capable(hdev))
934 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935
936 return settings;
937 }
938
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 struct mgmt_pending_cmd *cmd;
947
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
950 */
951 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (cmd) {
953 struct mgmt_mode *cp = cmd->param;
954 if (cp->val == 0x01)
955 return LE_AD_GENERAL;
956 else if (cp->val == 0x02)
957 return LE_AD_LIMITED;
958 } else {
959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 return LE_AD_LIMITED;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 return LE_AD_GENERAL;
963 }
964
965 return 0;
966 }
967
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 struct mgmt_pending_cmd *cmd;
971
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
974 */
975 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 if (cmd) {
977 struct mgmt_mode *cp = cmd->param;
978
979 return cp->val;
980 }
981
982 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 hci_update_eir_sync(hdev);
988 hci_update_class_sync(hdev);
989
990 return 0;
991 }
992
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 service_cache.work);
997
998 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 return;
1000
1001 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1008 * function.
1009 */
1010 if (ext_adv_capable(hdev))
1011 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 else
1013 return hci_enable_advertising_sync(hdev);
1014 }
1015
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 rpa_expired.work);
1020
1021 bt_dev_dbg(hdev, "");
1022
1023 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024
1025 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 return;
1027
1028 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 discov_off.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_lock(hdev);
1041
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1046 */
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1050
1051 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052
1053 mgmt_new_settings(hdev);
1054
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 u8 handle = mesh_tx->handle;
1064
1065 if (!silent)
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1068
1069 mgmt_mesh_remove(mesh_tx);
1070 }
1071
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 struct mgmt_mesh_tx *mesh_tx;
1075
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079
1080 if (mesh_tx)
1081 mesh_send_complete(hdev, mesh_tx, false);
1082
1083 return 0;
1084 }
1085
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (!mesh_tx)
1093 return;
1094
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1097
1098 if (err < 0)
1099 mesh_send_complete(hdev, mesh_tx, false);
1100 else
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1108
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 return;
1111
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 return;
1119
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1130 * it
1131 */
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1139 {
1140 struct mgmt_rp_read_info rp;
1141
1142 bt_dev_dbg(hdev, "sock %p", sk);
1143
1144 hci_dev_lock(hdev);
1145
1146 memset(&rp, 0, sizeof(rp));
1147
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1157
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160
1161 hci_dev_unlock(hdev);
1162
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 sizeof(rp));
1165 }
1166
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 u16 eir_len = 0;
1170 size_t name_len;
1171
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1175
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 hdev->appearance);
1179
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1183
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1187
1188 return eir_len;
1189 }
1190
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1193 {
1194 char buf[512];
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 u16 eir_len;
1197
1198 bt_dev_dbg(hdev, "sock %p", sk);
1199
1200 memset(&buf, 0, sizeof(buf));
1201
1202 hci_dev_lock(hdev);
1203
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211
1212
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1215
1216 hci_dev_unlock(hdev);
1217
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1221 * is used.
1222 */
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1229 }
1230
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 char buf[512];
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 u16 eir_len;
1236
1237 memset(buf, 0, sizeof(buf));
1238
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1241
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 struct mgmt_ev_advertising_added ev;
1258
1259 ev.instance = instance;
1260
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 u8 instance)
1266 {
1267 struct mgmt_ev_advertising_removed ev;
1268
1269 ev.instance = instance;
1270
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1279 }
1280 }
1281
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 struct hci_conn_params *p;
1286
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1290 */
1291 hci_pend_le_list_del_init(p);
1292
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 break;
1298 case HCI_AUTO_CONN_REPORT:
1299 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 break;
1301 default:
1302 break;
1303 }
1304 }
1305 }
1306
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1319
1320 /* Make sure cmd still outstanding. */
1321 if (err == -ECANCELED ||
1322 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 return;
1324
1325 cp = cmd->param;
1326
1327 bt_dev_dbg(hdev, "err %d", err);
1328
1329 if (!err) {
1330 if (cp->val) {
1331 hci_dev_lock(hdev);
1332 restart_le_actions(hdev);
1333 hci_update_passive_scan(hdev);
1334 hci_dev_unlock(hdev);
1335 }
1336
1337 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338
1339 /* Only call new_setting for power on as power off is deferred
1340 * to hdev->power_off work which does call hci_dev_do_close.
1341 */
1342 if (cp->val)
1343 new_settings(hdev, cmd->sk);
1344 } else {
1345 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 mgmt_status(err));
1347 }
1348
1349 mgmt_pending_remove(cmd);
1350 }
1351
set_powered_sync(struct hci_dev * hdev,void * data)1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 struct mgmt_pending_cmd *cmd = data;
1355 struct mgmt_mode *cp;
1356
1357 /* Make sure cmd still outstanding. */
1358 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 return -ECANCELED;
1360
1361 cp = cmd->param;
1362
1363 BT_DBG("%s", hdev->name);
1364
1365 return hci_set_powered_sync(hdev, cp->val);
1366 }
1367
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 u16 len)
1370 {
1371 struct mgmt_mode *cp = data;
1372 struct mgmt_pending_cmd *cmd;
1373 int err;
1374
1375 bt_dev_dbg(hdev, "sock %p", sk);
1376
1377 if (cp->val != 0x00 && cp->val != 0x01)
1378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 MGMT_STATUS_INVALID_PARAMS);
1380
1381 hci_dev_lock(hdev);
1382
1383 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_BUSY);
1386 goto failed;
1387 }
1388
1389 if (!!cp->val == hdev_is_powered(hdev)) {
1390 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 goto failed;
1392 }
1393
1394 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 if (!cmd) {
1396 err = -ENOMEM;
1397 goto failed;
1398 }
1399
1400 /* Cancel potentially blocking sync operation before power off */
1401 if (cp->val == 0x00) {
1402 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 mgmt_set_powered_complete);
1405 } else {
1406 /* Use hci_cmd_sync_submit since hdev might not be running */
1407 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 mgmt_set_powered_complete);
1409 }
1410
1411 if (err < 0)
1412 mgmt_pending_remove(cmd);
1413
1414 failed:
1415 hci_dev_unlock(hdev);
1416 return err;
1417 }
1418
mgmt_new_settings(struct hci_dev * hdev)1419 int mgmt_new_settings(struct hci_dev *hdev)
1420 {
1421 return new_settings(hdev, NULL);
1422 }
1423
1424 struct cmd_lookup {
1425 struct sock *sk;
1426 struct hci_dev *hdev;
1427 u8 mgmt_status;
1428 };
1429
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 {
1432 struct cmd_lookup *match = data;
1433
1434 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435
1436 list_del(&cmd->list);
1437
1438 if (match->sk == NULL) {
1439 match->sk = cmd->sk;
1440 sock_hold(match->sk);
1441 }
1442
1443 mgmt_pending_free(cmd);
1444 }
1445
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1446 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447 {
1448 u8 *status = data;
1449
1450 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 mgmt_pending_remove(cmd);
1452 }
1453
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 struct cmd_lookup *match = data;
1457
1458 /* dequeue cmd_sync entries using cmd as data as that is about to be
1459 * removed/freed.
1460 */
1461 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1462
1463 if (cmd->cmd_complete) {
1464 cmd->cmd_complete(cmd, match->mgmt_status);
1465 mgmt_pending_remove(cmd);
1466
1467 return;
1468 }
1469
1470 cmd_status_rsp(cmd, data);
1471 }
1472
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 cmd->param, cmd->param_len);
1477 }
1478
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 cmd->param, sizeof(struct mgmt_addr_info));
1483 }
1484
mgmt_bredr_support(struct hci_dev * hdev)1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 {
1487 if (!lmp_bredr_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 return MGMT_STATUS_REJECTED;
1491 else
1492 return MGMT_STATUS_SUCCESS;
1493 }
1494
mgmt_le_support(struct hci_dev * hdev)1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 {
1497 if (!lmp_le_capable(hdev))
1498 return MGMT_STATUS_NOT_SUPPORTED;
1499 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 return MGMT_STATUS_REJECTED;
1501 else
1502 return MGMT_STATUS_SUCCESS;
1503 }
1504
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1506 int err)
1507 {
1508 struct mgmt_pending_cmd *cmd = data;
1509
1510 bt_dev_dbg(hdev, "err %d", err);
1511
1512 /* Make sure cmd still outstanding. */
1513 if (err == -ECANCELED ||
1514 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 if (err) {
1520 u8 mgmt_err = mgmt_status(err);
1521 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1522 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1523 goto done;
1524 }
1525
1526 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1527 hdev->discov_timeout > 0) {
1528 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1529 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1530 }
1531
1532 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1533 new_settings(hdev, cmd->sk);
1534
1535 done:
1536 mgmt_pending_remove(cmd);
1537 hci_dev_unlock(hdev);
1538 }
1539
set_discoverable_sync(struct hci_dev * hdev,void * data)1540 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1541 {
1542 BT_DBG("%s", hdev->name);
1543
1544 return hci_update_discoverable_sync(hdev);
1545 }
1546
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1547 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1548 u16 len)
1549 {
1550 struct mgmt_cp_set_discoverable *cp = data;
1551 struct mgmt_pending_cmd *cmd;
1552 u16 timeout;
1553 int err;
1554
1555 bt_dev_dbg(hdev, "sock %p", sk);
1556
1557 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1558 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560 MGMT_STATUS_REJECTED);
1561
1562 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1564 MGMT_STATUS_INVALID_PARAMS);
1565
1566 timeout = __le16_to_cpu(cp->timeout);
1567
1568 /* Disabling discoverable requires that no timeout is set,
1569 * and enabling limited discoverable requires a timeout.
1570 */
1571 if ((cp->val == 0x00 && timeout > 0) ||
1572 (cp->val == 0x02 && timeout == 0))
1573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_INVALID_PARAMS);
1575
1576 hci_dev_lock(hdev);
1577
1578 if (!hdev_is_powered(hdev) && timeout > 0) {
1579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_NOT_POWERED);
1581 goto failed;
1582 }
1583
1584 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1585 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_BUSY);
1588 goto failed;
1589 }
1590
1591 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_REJECTED);
1594 goto failed;
1595 }
1596
1597 if (hdev->advertising_paused) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_BUSY);
1600 goto failed;
1601 }
1602
1603 if (!hdev_is_powered(hdev)) {
1604 bool changed = false;
1605
1606 /* Setting limited discoverable when powered off is
1607 * not a valid operation since it requires a timeout
1608 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1609 */
1610 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1611 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1612 changed = true;
1613 }
1614
1615 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 if (err < 0)
1617 goto failed;
1618
1619 if (changed)
1620 err = new_settings(hdev, sk);
1621
1622 goto failed;
1623 }
1624
1625 /* If the current mode is the same, then just update the timeout
1626 * value with the new value. And if only the timeout gets updated,
1627 * then no need for any HCI transactions.
1628 */
1629 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1630 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1631 HCI_LIMITED_DISCOVERABLE)) {
1632 cancel_delayed_work(&hdev->discov_off);
1633 hdev->discov_timeout = timeout;
1634
1635 if (cp->val && hdev->discov_timeout > 0) {
1636 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1637 queue_delayed_work(hdev->req_workqueue,
1638 &hdev->discov_off, to);
1639 }
1640
1641 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1642 goto failed;
1643 }
1644
1645 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1646 if (!cmd) {
1647 err = -ENOMEM;
1648 goto failed;
1649 }
1650
1651 /* Cancel any potential discoverable timeout that might be
1652 * still active and store new timeout value. The arming of
1653 * the timeout happens in the complete handler.
1654 */
1655 cancel_delayed_work(&hdev->discov_off);
1656 hdev->discov_timeout = timeout;
1657
1658 if (cp->val)
1659 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1660 else
1661 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1662
1663 /* Limited discoverable mode */
1664 if (cp->val == 0x02)
1665 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1666 else
1667 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1668
1669 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1670 mgmt_set_discoverable_complete);
1671
1672 if (err < 0)
1673 mgmt_pending_remove(cmd);
1674
1675 failed:
1676 hci_dev_unlock(hdev);
1677 return err;
1678 }
1679
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1680 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1681 int err)
1682 {
1683 struct mgmt_pending_cmd *cmd = data;
1684
1685 bt_dev_dbg(hdev, "err %d", err);
1686
1687 /* Make sure cmd still outstanding. */
1688 if (err == -ECANCELED ||
1689 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1690 return;
1691
1692 hci_dev_lock(hdev);
1693
1694 if (err) {
1695 u8 mgmt_err = mgmt_status(err);
1696 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1697 goto done;
1698 }
1699
1700 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1701 new_settings(hdev, cmd->sk);
1702
1703 done:
1704 if (cmd)
1705 mgmt_pending_remove(cmd);
1706
1707 hci_dev_unlock(hdev);
1708 }
1709
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1710 static int set_connectable_update_settings(struct hci_dev *hdev,
1711 struct sock *sk, u8 val)
1712 {
1713 bool changed = false;
1714 int err;
1715
1716 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1717 changed = true;
1718
1719 if (val) {
1720 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1721 } else {
1722 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1723 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1724 }
1725
1726 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1727 if (err < 0)
1728 return err;
1729
1730 if (changed) {
1731 hci_update_scan(hdev);
1732 hci_update_passive_scan(hdev);
1733 return new_settings(hdev, sk);
1734 }
1735
1736 return 0;
1737 }
1738
set_connectable_sync(struct hci_dev * hdev,void * data)1739 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1740 {
1741 BT_DBG("%s", hdev->name);
1742
1743 return hci_update_connectable_sync(hdev);
1744 }
1745
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1746 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1747 u16 len)
1748 {
1749 struct mgmt_mode *cp = data;
1750 struct mgmt_pending_cmd *cmd;
1751 int err;
1752
1753 bt_dev_dbg(hdev, "sock %p", sk);
1754
1755 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1756 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1758 MGMT_STATUS_REJECTED);
1759
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1762 MGMT_STATUS_INVALID_PARAMS);
1763
1764 hci_dev_lock(hdev);
1765
1766 if (!hdev_is_powered(hdev)) {
1767 err = set_connectable_update_settings(hdev, sk, cp->val);
1768 goto failed;
1769 }
1770
1771 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1772 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1773 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1774 MGMT_STATUS_BUSY);
1775 goto failed;
1776 }
1777
1778 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1779 if (!cmd) {
1780 err = -ENOMEM;
1781 goto failed;
1782 }
1783
1784 if (cp->val) {
1785 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1786 } else {
1787 if (hdev->discov_timeout > 0)
1788 cancel_delayed_work(&hdev->discov_off);
1789
1790 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1791 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1792 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1793 }
1794
1795 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1796 mgmt_set_connectable_complete);
1797
1798 if (err < 0)
1799 mgmt_pending_remove(cmd);
1800
1801 failed:
1802 hci_dev_unlock(hdev);
1803 return err;
1804 }
1805
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1806 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1807 u16 len)
1808 {
1809 struct mgmt_mode *cp = data;
1810 bool changed;
1811 int err;
1812
1813 bt_dev_dbg(hdev, "sock %p", sk);
1814
1815 if (cp->val != 0x00 && cp->val != 0x01)
1816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1817 MGMT_STATUS_INVALID_PARAMS);
1818
1819 hci_dev_lock(hdev);
1820
1821 if (cp->val)
1822 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1823 else
1824 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1825
1826 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1827 if (err < 0)
1828 goto unlock;
1829
1830 if (changed) {
1831 /* In limited privacy mode the change of bondable mode
1832 * may affect the local advertising address.
1833 */
1834 hci_update_discoverable(hdev);
1835
1836 err = new_settings(hdev, sk);
1837 }
1838
1839 unlock:
1840 hci_dev_unlock(hdev);
1841 return err;
1842 }
1843
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1844 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1845 u16 len)
1846 {
1847 struct mgmt_mode *cp = data;
1848 struct mgmt_pending_cmd *cmd;
1849 u8 val, status;
1850 int err;
1851
1852 bt_dev_dbg(hdev, "sock %p", sk);
1853
1854 status = mgmt_bredr_support(hdev);
1855 if (status)
1856 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1857 status);
1858
1859 if (cp->val != 0x00 && cp->val != 0x01)
1860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1861 MGMT_STATUS_INVALID_PARAMS);
1862
1863 hci_dev_lock(hdev);
1864
1865 if (!hdev_is_powered(hdev)) {
1866 bool changed = false;
1867
1868 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1869 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1870 changed = true;
1871 }
1872
1873 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1874 if (err < 0)
1875 goto failed;
1876
1877 if (changed)
1878 err = new_settings(hdev, sk);
1879
1880 goto failed;
1881 }
1882
1883 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1884 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1885 MGMT_STATUS_BUSY);
1886 goto failed;
1887 }
1888
1889 val = !!cp->val;
1890
1891 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1892 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1893 goto failed;
1894 }
1895
1896 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1897 if (!cmd) {
1898 err = -ENOMEM;
1899 goto failed;
1900 }
1901
1902 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1903 if (err < 0) {
1904 mgmt_pending_remove(cmd);
1905 goto failed;
1906 }
1907
1908 failed:
1909 hci_dev_unlock(hdev);
1910 return err;
1911 }
1912
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1913 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1914 {
1915 struct cmd_lookup match = { NULL, hdev };
1916 struct mgmt_pending_cmd *cmd = data;
1917 struct mgmt_mode *cp = cmd->param;
1918 u8 enable = cp->val;
1919 bool changed;
1920
1921 /* Make sure cmd still outstanding. */
1922 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1923 return;
1924
1925 if (err) {
1926 u8 mgmt_err = mgmt_status(err);
1927
1928 if (enable && hci_dev_test_and_clear_flag(hdev,
1929 HCI_SSP_ENABLED)) {
1930 new_settings(hdev, NULL);
1931 }
1932
1933 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1934 &mgmt_err);
1935 return;
1936 }
1937
1938 if (enable) {
1939 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1940 } else {
1941 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1942 }
1943
1944 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1945
1946 if (changed)
1947 new_settings(hdev, match.sk);
1948
1949 if (match.sk)
1950 sock_put(match.sk);
1951
1952 hci_update_eir_sync(hdev);
1953 }
1954
set_ssp_sync(struct hci_dev * hdev,void * data)1955 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1956 {
1957 struct mgmt_pending_cmd *cmd = data;
1958 struct mgmt_mode *cp = cmd->param;
1959 bool changed = false;
1960 int err;
1961
1962 if (cp->val)
1963 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1964
1965 err = hci_write_ssp_mode_sync(hdev, cp->val);
1966
1967 if (!err && changed)
1968 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1969
1970 return err;
1971 }
1972
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1973 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1974 {
1975 struct mgmt_mode *cp = data;
1976 struct mgmt_pending_cmd *cmd;
1977 u8 status;
1978 int err;
1979
1980 bt_dev_dbg(hdev, "sock %p", sk);
1981
1982 status = mgmt_bredr_support(hdev);
1983 if (status)
1984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1985
1986 if (!lmp_ssp_capable(hdev))
1987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1988 MGMT_STATUS_NOT_SUPPORTED);
1989
1990 if (cp->val != 0x00 && cp->val != 0x01)
1991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1992 MGMT_STATUS_INVALID_PARAMS);
1993
1994 hci_dev_lock(hdev);
1995
1996 if (!hdev_is_powered(hdev)) {
1997 bool changed;
1998
1999 if (cp->val) {
2000 changed = !hci_dev_test_and_set_flag(hdev,
2001 HCI_SSP_ENABLED);
2002 } else {
2003 changed = hci_dev_test_and_clear_flag(hdev,
2004 HCI_SSP_ENABLED);
2005 }
2006
2007 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2008 if (err < 0)
2009 goto failed;
2010
2011 if (changed)
2012 err = new_settings(hdev, sk);
2013
2014 goto failed;
2015 }
2016
2017 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2018 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 MGMT_STATUS_BUSY);
2020 goto failed;
2021 }
2022
2023 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2024 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2025 goto failed;
2026 }
2027
2028 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2029 if (!cmd)
2030 err = -ENOMEM;
2031 else
2032 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2033 set_ssp_complete);
2034
2035 if (err < 0) {
2036 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2037 MGMT_STATUS_FAILED);
2038
2039 if (cmd)
2040 mgmt_pending_remove(cmd);
2041 }
2042
2043 failed:
2044 hci_dev_unlock(hdev);
2045 return err;
2046 }
2047
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2048 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2049 {
2050 bt_dev_dbg(hdev, "sock %p", sk);
2051
2052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2053 MGMT_STATUS_NOT_SUPPORTED);
2054 }
2055
set_le_complete(struct hci_dev * hdev,void * data,int err)2056 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2057 {
2058 struct cmd_lookup match = { NULL, hdev };
2059 u8 status = mgmt_status(err);
2060
2061 bt_dev_dbg(hdev, "err %d", err);
2062
2063 if (status) {
2064 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2065 &status);
2066 return;
2067 }
2068
2069 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2070
2071 new_settings(hdev, match.sk);
2072
2073 if (match.sk)
2074 sock_put(match.sk);
2075 }
2076
set_le_sync(struct hci_dev * hdev,void * data)2077 static int set_le_sync(struct hci_dev *hdev, void *data)
2078 {
2079 struct mgmt_pending_cmd *cmd = data;
2080 struct mgmt_mode *cp = cmd->param;
2081 u8 val = !!cp->val;
2082 int err;
2083
2084 if (!val) {
2085 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2086
2087 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2088 hci_disable_advertising_sync(hdev);
2089
2090 if (ext_adv_capable(hdev))
2091 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2092 } else {
2093 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2094 }
2095
2096 err = hci_write_le_host_supported_sync(hdev, val, 0);
2097
2098 /* Make sure the controller has a good default for
2099 * advertising data. Restrict the update to when LE
2100 * has actually been enabled. During power on, the
2101 * update in powered_update_hci will take care of it.
2102 */
2103 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2104 if (ext_adv_capable(hdev)) {
2105 int status;
2106
2107 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2108 if (!status)
2109 hci_update_scan_rsp_data_sync(hdev, 0x00);
2110 } else {
2111 hci_update_adv_data_sync(hdev, 0x00);
2112 hci_update_scan_rsp_data_sync(hdev, 0x00);
2113 }
2114
2115 hci_update_passive_scan(hdev);
2116 }
2117
2118 return err;
2119 }
2120
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2121 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2122 {
2123 struct mgmt_pending_cmd *cmd = data;
2124 u8 status = mgmt_status(err);
2125 struct sock *sk = cmd->sk;
2126
2127 if (status) {
2128 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2129 cmd_status_rsp, &status);
2130 return;
2131 }
2132
2133 mgmt_pending_remove(cmd);
2134 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2135 }
2136
set_mesh_sync(struct hci_dev * hdev,void * data)2137 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2138 {
2139 struct mgmt_pending_cmd *cmd = data;
2140 struct mgmt_cp_set_mesh *cp = cmd->param;
2141 size_t len = cmd->param_len;
2142
2143 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2144
2145 if (cp->enable)
2146 hci_dev_set_flag(hdev, HCI_MESH);
2147 else
2148 hci_dev_clear_flag(hdev, HCI_MESH);
2149
2150 len -= sizeof(*cp);
2151
2152 /* If filters don't fit, forward all adv pkts */
2153 if (len <= sizeof(hdev->mesh_ad_types))
2154 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2155
2156 hci_update_passive_scan_sync(hdev);
2157 return 0;
2158 }
2159
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2160 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2161 {
2162 struct mgmt_cp_set_mesh *cp = data;
2163 struct mgmt_pending_cmd *cmd;
2164 int err = 0;
2165
2166 bt_dev_dbg(hdev, "sock %p", sk);
2167
2168 if (!lmp_le_capable(hdev) ||
2169 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2170 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2171 MGMT_STATUS_NOT_SUPPORTED);
2172
2173 if (cp->enable != 0x00 && cp->enable != 0x01)
2174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2175 MGMT_STATUS_INVALID_PARAMS);
2176
2177 hci_dev_lock(hdev);
2178
2179 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2180 if (!cmd)
2181 err = -ENOMEM;
2182 else
2183 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2184 set_mesh_complete);
2185
2186 if (err < 0) {
2187 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2188 MGMT_STATUS_FAILED);
2189
2190 if (cmd)
2191 mgmt_pending_remove(cmd);
2192 }
2193
2194 hci_dev_unlock(hdev);
2195 return err;
2196 }
2197
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2198 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2199 {
2200 struct mgmt_mesh_tx *mesh_tx = data;
2201 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2202 unsigned long mesh_send_interval;
2203 u8 mgmt_err = mgmt_status(err);
2204
2205 /* Report any errors here, but don't report completion */
2206
2207 if (mgmt_err) {
2208 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2209 /* Send Complete Error Code for handle */
2210 mesh_send_complete(hdev, mesh_tx, false);
2211 return;
2212 }
2213
2214 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2215 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2216 mesh_send_interval);
2217 }
2218
mesh_send_sync(struct hci_dev * hdev,void * data)2219 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2220 {
2221 struct mgmt_mesh_tx *mesh_tx = data;
2222 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2223 struct adv_info *adv, *next_instance;
2224 u8 instance = hdev->le_num_of_adv_sets + 1;
2225 u16 timeout, duration;
2226 int err = 0;
2227
2228 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2229 return MGMT_STATUS_BUSY;
2230
2231 timeout = 1000;
2232 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2233 adv = hci_add_adv_instance(hdev, instance, 0,
2234 send->adv_data_len, send->adv_data,
2235 0, NULL,
2236 timeout, duration,
2237 HCI_ADV_TX_POWER_NO_PREFERENCE,
2238 hdev->le_adv_min_interval,
2239 hdev->le_adv_max_interval,
2240 mesh_tx->handle);
2241
2242 if (!IS_ERR(adv))
2243 mesh_tx->instance = instance;
2244 else
2245 err = PTR_ERR(adv);
2246
2247 if (hdev->cur_adv_instance == instance) {
2248 /* If the currently advertised instance is being changed then
2249 * cancel the current advertising and schedule the next
2250 * instance. If there is only one instance then the overridden
2251 * advertising data will be visible right away.
2252 */
2253 cancel_adv_timeout(hdev);
2254
2255 next_instance = hci_get_next_instance(hdev, instance);
2256 if (next_instance)
2257 instance = next_instance->instance;
2258 else
2259 instance = 0;
2260 } else if (hdev->adv_instance_timeout) {
2261 /* Immediately advertise the new instance if no other, or
2262 * let it go naturally from queue if ADV is already happening
2263 */
2264 instance = 0;
2265 }
2266
2267 if (instance)
2268 return hci_schedule_adv_instance_sync(hdev, instance, true);
2269
2270 return err;
2271 }
2272
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2273 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2274 {
2275 struct mgmt_rp_mesh_read_features *rp = data;
2276
2277 if (rp->used_handles >= rp->max_handles)
2278 return;
2279
2280 rp->handles[rp->used_handles++] = mesh_tx->handle;
2281 }
2282
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2283 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2284 void *data, u16 len)
2285 {
2286 struct mgmt_rp_mesh_read_features rp;
2287
2288 if (!lmp_le_capable(hdev) ||
2289 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2290 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2291 MGMT_STATUS_NOT_SUPPORTED);
2292
2293 memset(&rp, 0, sizeof(rp));
2294 rp.index = cpu_to_le16(hdev->id);
2295 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2296 rp.max_handles = MESH_HANDLES_MAX;
2297
2298 hci_dev_lock(hdev);
2299
2300 if (rp.max_handles)
2301 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2302
2303 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2304 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2305
2306 hci_dev_unlock(hdev);
2307 return 0;
2308 }
2309
send_cancel(struct hci_dev * hdev,void * data)2310 static int send_cancel(struct hci_dev *hdev, void *data)
2311 {
2312 struct mgmt_pending_cmd *cmd = data;
2313 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2314 struct mgmt_mesh_tx *mesh_tx;
2315
2316 if (!cancel->handle) {
2317 do {
2318 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2319
2320 if (mesh_tx)
2321 mesh_send_complete(hdev, mesh_tx, false);
2322 } while (mesh_tx);
2323 } else {
2324 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2325
2326 if (mesh_tx && mesh_tx->sk == cmd->sk)
2327 mesh_send_complete(hdev, mesh_tx, false);
2328 }
2329
2330 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2331 0, NULL, 0);
2332 mgmt_pending_free(cmd);
2333
2334 return 0;
2335 }
2336
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2337 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2338 void *data, u16 len)
2339 {
2340 struct mgmt_pending_cmd *cmd;
2341 int err;
2342
2343 if (!lmp_le_capable(hdev) ||
2344 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2346 MGMT_STATUS_NOT_SUPPORTED);
2347
2348 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2350 MGMT_STATUS_REJECTED);
2351
2352 hci_dev_lock(hdev);
2353 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2354 if (!cmd)
2355 err = -ENOMEM;
2356 else
2357 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2358
2359 if (err < 0) {
2360 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2361 MGMT_STATUS_FAILED);
2362
2363 if (cmd)
2364 mgmt_pending_free(cmd);
2365 }
2366
2367 hci_dev_unlock(hdev);
2368 return err;
2369 }
2370
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2371 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2372 {
2373 struct mgmt_mesh_tx *mesh_tx;
2374 struct mgmt_cp_mesh_send *send = data;
2375 struct mgmt_rp_mesh_read_features rp;
2376 bool sending;
2377 int err = 0;
2378
2379 if (!lmp_le_capable(hdev) ||
2380 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2381 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2382 MGMT_STATUS_NOT_SUPPORTED);
2383 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2384 len <= MGMT_MESH_SEND_SIZE ||
2385 len > (MGMT_MESH_SEND_SIZE + 31))
2386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2387 MGMT_STATUS_REJECTED);
2388
2389 hci_dev_lock(hdev);
2390
2391 memset(&rp, 0, sizeof(rp));
2392 rp.max_handles = MESH_HANDLES_MAX;
2393
2394 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2395
2396 if (rp.max_handles <= rp.used_handles) {
2397 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2398 MGMT_STATUS_BUSY);
2399 goto done;
2400 }
2401
2402 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2403 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2404
2405 if (!mesh_tx)
2406 err = -ENOMEM;
2407 else if (!sending)
2408 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2409 mesh_send_start_complete);
2410
2411 if (err < 0) {
2412 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2413 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2414 MGMT_STATUS_FAILED);
2415
2416 if (mesh_tx) {
2417 if (sending)
2418 mgmt_mesh_remove(mesh_tx);
2419 }
2420 } else {
2421 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2422
2423 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2424 &mesh_tx->handle, 1);
2425 }
2426
2427 done:
2428 hci_dev_unlock(hdev);
2429 return err;
2430 }
2431
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2432 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2433 {
2434 struct mgmt_mode *cp = data;
2435 struct mgmt_pending_cmd *cmd;
2436 int err;
2437 u8 val, enabled;
2438
2439 bt_dev_dbg(hdev, "sock %p", sk);
2440
2441 if (!lmp_le_capable(hdev))
2442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2443 MGMT_STATUS_NOT_SUPPORTED);
2444
2445 if (cp->val != 0x00 && cp->val != 0x01)
2446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2447 MGMT_STATUS_INVALID_PARAMS);
2448
2449 /* Bluetooth single mode LE only controllers or dual-mode
2450 * controllers configured as LE only devices, do not allow
2451 * switching LE off. These have either LE enabled explicitly
2452 * or BR/EDR has been previously switched off.
2453 *
2454 * When trying to enable an already enabled LE, then gracefully
2455 * send a positive response. Trying to disable it however will
2456 * result into rejection.
2457 */
2458 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2459 if (cp->val == 0x01)
2460 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2461
2462 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2463 MGMT_STATUS_REJECTED);
2464 }
2465
2466 hci_dev_lock(hdev);
2467
2468 val = !!cp->val;
2469 enabled = lmp_host_le_capable(hdev);
2470
2471 if (!hdev_is_powered(hdev) || val == enabled) {
2472 bool changed = false;
2473
2474 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2475 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2476 changed = true;
2477 }
2478
2479 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2480 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2481 changed = true;
2482 }
2483
2484 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2485 if (err < 0)
2486 goto unlock;
2487
2488 if (changed)
2489 err = new_settings(hdev, sk);
2490
2491 goto unlock;
2492 }
2493
2494 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2495 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2496 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2497 MGMT_STATUS_BUSY);
2498 goto unlock;
2499 }
2500
2501 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2502 if (!cmd)
2503 err = -ENOMEM;
2504 else
2505 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2506 set_le_complete);
2507
2508 if (err < 0) {
2509 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2510 MGMT_STATUS_FAILED);
2511
2512 if (cmd)
2513 mgmt_pending_remove(cmd);
2514 }
2515
2516 unlock:
2517 hci_dev_unlock(hdev);
2518 return err;
2519 }
2520
2521 /* This is a helper function to test for pending mgmt commands that can
2522 * cause CoD or EIR HCI commands. We can only allow one such pending
2523 * mgmt command at a time since otherwise we cannot easily track what
2524 * the current values are, will be, and based on that calculate if a new
2525 * HCI command needs to be sent and if yes with what value.
2526 */
pending_eir_or_class(struct hci_dev * hdev)2527 static bool pending_eir_or_class(struct hci_dev *hdev)
2528 {
2529 struct mgmt_pending_cmd *cmd;
2530
2531 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2532 switch (cmd->opcode) {
2533 case MGMT_OP_ADD_UUID:
2534 case MGMT_OP_REMOVE_UUID:
2535 case MGMT_OP_SET_DEV_CLASS:
2536 case MGMT_OP_SET_POWERED:
2537 return true;
2538 }
2539 }
2540
2541 return false;
2542 }
2543
2544 static const u8 bluetooth_base_uuid[] = {
2545 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2546 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2547 };
2548
get_uuid_size(const u8 * uuid)2549 static u8 get_uuid_size(const u8 *uuid)
2550 {
2551 u32 val;
2552
2553 if (memcmp(uuid, bluetooth_base_uuid, 12))
2554 return 128;
2555
2556 val = get_unaligned_le32(&uuid[12]);
2557 if (val > 0xffff)
2558 return 32;
2559
2560 return 16;
2561 }
2562
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2563 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2564 {
2565 struct mgmt_pending_cmd *cmd = data;
2566
2567 bt_dev_dbg(hdev, "err %d", err);
2568
2569 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2570 mgmt_status(err), hdev->dev_class, 3);
2571
2572 mgmt_pending_free(cmd);
2573 }
2574
add_uuid_sync(struct hci_dev * hdev,void * data)2575 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2576 {
2577 int err;
2578
2579 err = hci_update_class_sync(hdev);
2580 if (err)
2581 return err;
2582
2583 return hci_update_eir_sync(hdev);
2584 }
2585
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2586 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2587 {
2588 struct mgmt_cp_add_uuid *cp = data;
2589 struct mgmt_pending_cmd *cmd;
2590 struct bt_uuid *uuid;
2591 int err;
2592
2593 bt_dev_dbg(hdev, "sock %p", sk);
2594
2595 hci_dev_lock(hdev);
2596
2597 if (pending_eir_or_class(hdev)) {
2598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2599 MGMT_STATUS_BUSY);
2600 goto failed;
2601 }
2602
2603 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2604 if (!uuid) {
2605 err = -ENOMEM;
2606 goto failed;
2607 }
2608
2609 memcpy(uuid->uuid, cp->uuid, 16);
2610 uuid->svc_hint = cp->svc_hint;
2611 uuid->size = get_uuid_size(cp->uuid);
2612
2613 list_add_tail(&uuid->list, &hdev->uuids);
2614
2615 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2616 if (!cmd) {
2617 err = -ENOMEM;
2618 goto failed;
2619 }
2620
2621 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2622 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2623 */
2624 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2625 mgmt_class_complete);
2626 if (err < 0) {
2627 mgmt_pending_free(cmd);
2628 goto failed;
2629 }
2630
2631 failed:
2632 hci_dev_unlock(hdev);
2633 return err;
2634 }
2635
enable_service_cache(struct hci_dev * hdev)2636 static bool enable_service_cache(struct hci_dev *hdev)
2637 {
2638 if (!hdev_is_powered(hdev))
2639 return false;
2640
2641 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2642 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2643 CACHE_TIMEOUT);
2644 return true;
2645 }
2646
2647 return false;
2648 }
2649
remove_uuid_sync(struct hci_dev * hdev,void * data)2650 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2651 {
2652 int err;
2653
2654 err = hci_update_class_sync(hdev);
2655 if (err)
2656 return err;
2657
2658 return hci_update_eir_sync(hdev);
2659 }
2660
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2661 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2662 u16 len)
2663 {
2664 struct mgmt_cp_remove_uuid *cp = data;
2665 struct mgmt_pending_cmd *cmd;
2666 struct bt_uuid *match, *tmp;
2667 static const u8 bt_uuid_any[] = {
2668 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2669 };
2670 int err, found;
2671
2672 bt_dev_dbg(hdev, "sock %p", sk);
2673
2674 hci_dev_lock(hdev);
2675
2676 if (pending_eir_or_class(hdev)) {
2677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2678 MGMT_STATUS_BUSY);
2679 goto unlock;
2680 }
2681
2682 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2683 hci_uuids_clear(hdev);
2684
2685 if (enable_service_cache(hdev)) {
2686 err = mgmt_cmd_complete(sk, hdev->id,
2687 MGMT_OP_REMOVE_UUID,
2688 0, hdev->dev_class, 3);
2689 goto unlock;
2690 }
2691
2692 goto update_class;
2693 }
2694
2695 found = 0;
2696
2697 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2698 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2699 continue;
2700
2701 list_del(&match->list);
2702 kfree(match);
2703 found++;
2704 }
2705
2706 if (found == 0) {
2707 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2708 MGMT_STATUS_INVALID_PARAMS);
2709 goto unlock;
2710 }
2711
2712 update_class:
2713 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2714 if (!cmd) {
2715 err = -ENOMEM;
2716 goto unlock;
2717 }
2718
2719 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2720 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2721 */
2722 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2723 mgmt_class_complete);
2724 if (err < 0)
2725 mgmt_pending_free(cmd);
2726
2727 unlock:
2728 hci_dev_unlock(hdev);
2729 return err;
2730 }
2731
set_class_sync(struct hci_dev * hdev,void * data)2732 static int set_class_sync(struct hci_dev *hdev, void *data)
2733 {
2734 int err = 0;
2735
2736 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2737 cancel_delayed_work_sync(&hdev->service_cache);
2738 err = hci_update_eir_sync(hdev);
2739 }
2740
2741 if (err)
2742 return err;
2743
2744 return hci_update_class_sync(hdev);
2745 }
2746
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2747 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2748 u16 len)
2749 {
2750 struct mgmt_cp_set_dev_class *cp = data;
2751 struct mgmt_pending_cmd *cmd;
2752 int err;
2753
2754 bt_dev_dbg(hdev, "sock %p", sk);
2755
2756 if (!lmp_bredr_capable(hdev))
2757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2758 MGMT_STATUS_NOT_SUPPORTED);
2759
2760 hci_dev_lock(hdev);
2761
2762 if (pending_eir_or_class(hdev)) {
2763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2764 MGMT_STATUS_BUSY);
2765 goto unlock;
2766 }
2767
2768 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2770 MGMT_STATUS_INVALID_PARAMS);
2771 goto unlock;
2772 }
2773
2774 hdev->major_class = cp->major;
2775 hdev->minor_class = cp->minor;
2776
2777 if (!hdev_is_powered(hdev)) {
2778 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2779 hdev->dev_class, 3);
2780 goto unlock;
2781 }
2782
2783 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2784 if (!cmd) {
2785 err = -ENOMEM;
2786 goto unlock;
2787 }
2788
2789 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2790 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2791 */
2792 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2793 mgmt_class_complete);
2794 if (err < 0)
2795 mgmt_pending_free(cmd);
2796
2797 unlock:
2798 hci_dev_unlock(hdev);
2799 return err;
2800 }
2801
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2802 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2803 u16 len)
2804 {
2805 struct mgmt_cp_load_link_keys *cp = data;
2806 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2807 sizeof(struct mgmt_link_key_info));
2808 u16 key_count, expected_len;
2809 bool changed;
2810 int i;
2811
2812 bt_dev_dbg(hdev, "sock %p", sk);
2813
2814 if (!lmp_bredr_capable(hdev))
2815 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2816 MGMT_STATUS_NOT_SUPPORTED);
2817
2818 key_count = __le16_to_cpu(cp->key_count);
2819 if (key_count > max_key_count) {
2820 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2821 key_count);
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 MGMT_STATUS_INVALID_PARAMS);
2824 }
2825
2826 expected_len = struct_size(cp, keys, key_count);
2827 if (expected_len != len) {
2828 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2829 expected_len, len);
2830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2831 MGMT_STATUS_INVALID_PARAMS);
2832 }
2833
2834 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2836 MGMT_STATUS_INVALID_PARAMS);
2837
2838 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2839 key_count);
2840
2841 hci_dev_lock(hdev);
2842
2843 hci_link_keys_clear(hdev);
2844
2845 if (cp->debug_keys)
2846 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2847 else
2848 changed = hci_dev_test_and_clear_flag(hdev,
2849 HCI_KEEP_DEBUG_KEYS);
2850
2851 if (changed)
2852 new_settings(hdev, NULL);
2853
2854 for (i = 0; i < key_count; i++) {
2855 struct mgmt_link_key_info *key = &cp->keys[i];
2856
2857 if (hci_is_blocked_key(hdev,
2858 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2859 key->val)) {
2860 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2861 &key->addr.bdaddr);
2862 continue;
2863 }
2864
2865 if (key->addr.type != BDADDR_BREDR) {
2866 bt_dev_warn(hdev,
2867 "Invalid link address type %u for %pMR",
2868 key->addr.type, &key->addr.bdaddr);
2869 continue;
2870 }
2871
2872 if (key->type > 0x08) {
2873 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2874 key->type, &key->addr.bdaddr);
2875 continue;
2876 }
2877
2878 /* Always ignore debug keys and require a new pairing if
2879 * the user wants to use them.
2880 */
2881 if (key->type == HCI_LK_DEBUG_COMBINATION)
2882 continue;
2883
2884 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2885 key->type, key->pin_len, NULL);
2886 }
2887
2888 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2889
2890 hci_dev_unlock(hdev);
2891
2892 return 0;
2893 }
2894
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2895 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2896 u8 addr_type, struct sock *skip_sk)
2897 {
2898 struct mgmt_ev_device_unpaired ev;
2899
2900 bacpy(&ev.addr.bdaddr, bdaddr);
2901 ev.addr.type = addr_type;
2902
2903 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2904 skip_sk);
2905 }
2906
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2907 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2908 {
2909 struct mgmt_pending_cmd *cmd = data;
2910 struct mgmt_cp_unpair_device *cp = cmd->param;
2911
2912 if (!err)
2913 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2914
2915 cmd->cmd_complete(cmd, err);
2916 mgmt_pending_free(cmd);
2917 }
2918
unpair_device_sync(struct hci_dev * hdev,void * data)2919 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2920 {
2921 struct mgmt_pending_cmd *cmd = data;
2922 struct mgmt_cp_unpair_device *cp = cmd->param;
2923 struct hci_conn *conn;
2924
2925 if (cp->addr.type == BDADDR_BREDR)
2926 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2927 &cp->addr.bdaddr);
2928 else
2929 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2930 le_addr_type(cp->addr.type));
2931
2932 if (!conn)
2933 return 0;
2934
2935 /* Disregard any possible error since the likes of hci_abort_conn_sync
2936 * will clean up the connection no matter the error.
2937 */
2938 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2939
2940 return 0;
2941 }
2942
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2943 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2944 u16 len)
2945 {
2946 struct mgmt_cp_unpair_device *cp = data;
2947 struct mgmt_rp_unpair_device rp;
2948 struct hci_conn_params *params;
2949 struct mgmt_pending_cmd *cmd;
2950 struct hci_conn *conn;
2951 u8 addr_type;
2952 int err;
2953
2954 memset(&rp, 0, sizeof(rp));
2955 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2956 rp.addr.type = cp->addr.type;
2957
2958 if (!bdaddr_type_is_valid(cp->addr.type))
2959 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2960 MGMT_STATUS_INVALID_PARAMS,
2961 &rp, sizeof(rp));
2962
2963 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2964 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2965 MGMT_STATUS_INVALID_PARAMS,
2966 &rp, sizeof(rp));
2967
2968 hci_dev_lock(hdev);
2969
2970 if (!hdev_is_powered(hdev)) {
2971 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2972 MGMT_STATUS_NOT_POWERED, &rp,
2973 sizeof(rp));
2974 goto unlock;
2975 }
2976
2977 if (cp->addr.type == BDADDR_BREDR) {
2978 /* If disconnection is requested, then look up the
2979 * connection. If the remote device is connected, it
2980 * will be later used to terminate the link.
2981 *
2982 * Setting it to NULL explicitly will cause no
2983 * termination of the link.
2984 */
2985 if (cp->disconnect)
2986 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2987 &cp->addr.bdaddr);
2988 else
2989 conn = NULL;
2990
2991 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2992 if (err < 0) {
2993 err = mgmt_cmd_complete(sk, hdev->id,
2994 MGMT_OP_UNPAIR_DEVICE,
2995 MGMT_STATUS_NOT_PAIRED, &rp,
2996 sizeof(rp));
2997 goto unlock;
2998 }
2999
3000 goto done;
3001 }
3002
3003 /* LE address type */
3004 addr_type = le_addr_type(cp->addr.type);
3005
3006 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3007 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3008 if (err < 0) {
3009 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3010 MGMT_STATUS_NOT_PAIRED, &rp,
3011 sizeof(rp));
3012 goto unlock;
3013 }
3014
3015 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3016 if (!conn) {
3017 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3018 goto done;
3019 }
3020
3021
3022 /* Defer clearing up the connection parameters until closing to
3023 * give a chance of keeping them if a repairing happens.
3024 */
3025 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3026
3027 /* Disable auto-connection parameters if present */
3028 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3029 if (params) {
3030 if (params->explicit_connect)
3031 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3032 else
3033 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3034 }
3035
3036 /* If disconnection is not requested, then clear the connection
3037 * variable so that the link is not terminated.
3038 */
3039 if (!cp->disconnect)
3040 conn = NULL;
3041
3042 done:
3043 /* If the connection variable is set, then termination of the
3044 * link is requested.
3045 */
3046 if (!conn) {
3047 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3048 &rp, sizeof(rp));
3049 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3050 goto unlock;
3051 }
3052
3053 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3054 sizeof(*cp));
3055 if (!cmd) {
3056 err = -ENOMEM;
3057 goto unlock;
3058 }
3059
3060 cmd->cmd_complete = addr_cmd_complete;
3061
3062 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3063 unpair_device_complete);
3064 if (err < 0)
3065 mgmt_pending_free(cmd);
3066
3067 unlock:
3068 hci_dev_unlock(hdev);
3069 return err;
3070 }
3071
disconnect_complete(struct hci_dev * hdev,void * data,int err)3072 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3073 {
3074 struct mgmt_pending_cmd *cmd = data;
3075
3076 cmd->cmd_complete(cmd, mgmt_status(err));
3077 mgmt_pending_free(cmd);
3078 }
3079
disconnect_sync(struct hci_dev * hdev,void * data)3080 static int disconnect_sync(struct hci_dev *hdev, void *data)
3081 {
3082 struct mgmt_pending_cmd *cmd = data;
3083 struct mgmt_cp_disconnect *cp = cmd->param;
3084 struct hci_conn *conn;
3085
3086 if (cp->addr.type == BDADDR_BREDR)
3087 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3088 &cp->addr.bdaddr);
3089 else
3090 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3091 le_addr_type(cp->addr.type));
3092
3093 if (!conn)
3094 return -ENOTCONN;
3095
3096 /* Disregard any possible error since the likes of hci_abort_conn_sync
3097 * will clean up the connection no matter the error.
3098 */
3099 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3100
3101 return 0;
3102 }
3103
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3104 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3105 u16 len)
3106 {
3107 struct mgmt_cp_disconnect *cp = data;
3108 struct mgmt_rp_disconnect rp;
3109 struct mgmt_pending_cmd *cmd;
3110 int err;
3111
3112 bt_dev_dbg(hdev, "sock %p", sk);
3113
3114 memset(&rp, 0, sizeof(rp));
3115 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3116 rp.addr.type = cp->addr.type;
3117
3118 if (!bdaddr_type_is_valid(cp->addr.type))
3119 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3120 MGMT_STATUS_INVALID_PARAMS,
3121 &rp, sizeof(rp));
3122
3123 hci_dev_lock(hdev);
3124
3125 if (!test_bit(HCI_UP, &hdev->flags)) {
3126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3127 MGMT_STATUS_NOT_POWERED, &rp,
3128 sizeof(rp));
3129 goto failed;
3130 }
3131
3132 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3133 if (!cmd) {
3134 err = -ENOMEM;
3135 goto failed;
3136 }
3137
3138 cmd->cmd_complete = generic_cmd_complete;
3139
3140 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3141 disconnect_complete);
3142 if (err < 0)
3143 mgmt_pending_free(cmd);
3144
3145 failed:
3146 hci_dev_unlock(hdev);
3147 return err;
3148 }
3149
link_to_bdaddr(u8 link_type,u8 addr_type)3150 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3151 {
3152 switch (link_type) {
3153 case ISO_LINK:
3154 case LE_LINK:
3155 switch (addr_type) {
3156 case ADDR_LE_DEV_PUBLIC:
3157 return BDADDR_LE_PUBLIC;
3158
3159 default:
3160 /* Fallback to LE Random address type */
3161 return BDADDR_LE_RANDOM;
3162 }
3163
3164 default:
3165 /* Fallback to BR/EDR type */
3166 return BDADDR_BREDR;
3167 }
3168 }
3169
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3170 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3171 u16 data_len)
3172 {
3173 struct mgmt_rp_get_connections *rp;
3174 struct hci_conn *c;
3175 int err;
3176 u16 i;
3177
3178 bt_dev_dbg(hdev, "sock %p", sk);
3179
3180 hci_dev_lock(hdev);
3181
3182 if (!hdev_is_powered(hdev)) {
3183 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3184 MGMT_STATUS_NOT_POWERED);
3185 goto unlock;
3186 }
3187
3188 i = 0;
3189 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3190 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3191 i++;
3192 }
3193
3194 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3195 if (!rp) {
3196 err = -ENOMEM;
3197 goto unlock;
3198 }
3199
3200 i = 0;
3201 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3202 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3203 continue;
3204 bacpy(&rp->addr[i].bdaddr, &c->dst);
3205 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3206 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3207 continue;
3208 i++;
3209 }
3210
3211 rp->conn_count = cpu_to_le16(i);
3212
3213 /* Recalculate length in case of filtered SCO connections, etc */
3214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3215 struct_size(rp, addr, i));
3216
3217 kfree(rp);
3218
3219 unlock:
3220 hci_dev_unlock(hdev);
3221 return err;
3222 }
3223
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3224 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3225 struct mgmt_cp_pin_code_neg_reply *cp)
3226 {
3227 struct mgmt_pending_cmd *cmd;
3228 int err;
3229
3230 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3231 sizeof(*cp));
3232 if (!cmd)
3233 return -ENOMEM;
3234
3235 cmd->cmd_complete = addr_cmd_complete;
3236
3237 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3238 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3239 if (err < 0)
3240 mgmt_pending_remove(cmd);
3241
3242 return err;
3243 }
3244
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3245 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3246 u16 len)
3247 {
3248 struct hci_conn *conn;
3249 struct mgmt_cp_pin_code_reply *cp = data;
3250 struct hci_cp_pin_code_reply reply;
3251 struct mgmt_pending_cmd *cmd;
3252 int err;
3253
3254 bt_dev_dbg(hdev, "sock %p", sk);
3255
3256 hci_dev_lock(hdev);
3257
3258 if (!hdev_is_powered(hdev)) {
3259 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3260 MGMT_STATUS_NOT_POWERED);
3261 goto failed;
3262 }
3263
3264 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3265 if (!conn) {
3266 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3267 MGMT_STATUS_NOT_CONNECTED);
3268 goto failed;
3269 }
3270
3271 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3272 struct mgmt_cp_pin_code_neg_reply ncp;
3273
3274 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3275
3276 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3277
3278 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3279 if (err >= 0)
3280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3281 MGMT_STATUS_INVALID_PARAMS);
3282
3283 goto failed;
3284 }
3285
3286 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3287 if (!cmd) {
3288 err = -ENOMEM;
3289 goto failed;
3290 }
3291
3292 cmd->cmd_complete = addr_cmd_complete;
3293
3294 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3295 reply.pin_len = cp->pin_len;
3296 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3297
3298 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3299 if (err < 0)
3300 mgmt_pending_remove(cmd);
3301
3302 failed:
3303 hci_dev_unlock(hdev);
3304 return err;
3305 }
3306
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3307 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3308 u16 len)
3309 {
3310 struct mgmt_cp_set_io_capability *cp = data;
3311
3312 bt_dev_dbg(hdev, "sock %p", sk);
3313
3314 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3315 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3316 MGMT_STATUS_INVALID_PARAMS);
3317
3318 hci_dev_lock(hdev);
3319
3320 hdev->io_capability = cp->io_capability;
3321
3322 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3323
3324 hci_dev_unlock(hdev);
3325
3326 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3327 NULL, 0);
3328 }
3329
find_pairing(struct hci_conn * conn)3330 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3331 {
3332 struct hci_dev *hdev = conn->hdev;
3333 struct mgmt_pending_cmd *cmd;
3334
3335 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3336 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3337 continue;
3338
3339 if (cmd->user_data != conn)
3340 continue;
3341
3342 return cmd;
3343 }
3344
3345 return NULL;
3346 }
3347
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3348 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3349 {
3350 struct mgmt_rp_pair_device rp;
3351 struct hci_conn *conn = cmd->user_data;
3352 int err;
3353
3354 bacpy(&rp.addr.bdaddr, &conn->dst);
3355 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3356
3357 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3358 status, &rp, sizeof(rp));
3359
3360 /* So we don't get further callbacks for this connection */
3361 conn->connect_cfm_cb = NULL;
3362 conn->security_cfm_cb = NULL;
3363 conn->disconn_cfm_cb = NULL;
3364
3365 hci_conn_drop(conn);
3366
3367 /* The device is paired so there is no need to remove
3368 * its connection parameters anymore.
3369 */
3370 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3371
3372 hci_conn_put(conn);
3373
3374 return err;
3375 }
3376
mgmt_smp_complete(struct hci_conn * conn,bool complete)3377 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3378 {
3379 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3380 struct mgmt_pending_cmd *cmd;
3381
3382 cmd = find_pairing(conn);
3383 if (cmd) {
3384 cmd->cmd_complete(cmd, status);
3385 mgmt_pending_remove(cmd);
3386 }
3387 }
3388
pairing_complete_cb(struct hci_conn * conn,u8 status)3389 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3390 {
3391 struct mgmt_pending_cmd *cmd;
3392
3393 BT_DBG("status %u", status);
3394
3395 cmd = find_pairing(conn);
3396 if (!cmd) {
3397 BT_DBG("Unable to find a pending command");
3398 return;
3399 }
3400
3401 cmd->cmd_complete(cmd, mgmt_status(status));
3402 mgmt_pending_remove(cmd);
3403 }
3404
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3405 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3406 {
3407 struct mgmt_pending_cmd *cmd;
3408
3409 BT_DBG("status %u", status);
3410
3411 if (!status)
3412 return;
3413
3414 cmd = find_pairing(conn);
3415 if (!cmd) {
3416 BT_DBG("Unable to find a pending command");
3417 return;
3418 }
3419
3420 cmd->cmd_complete(cmd, mgmt_status(status));
3421 mgmt_pending_remove(cmd);
3422 }
3423
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3424 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3425 u16 len)
3426 {
3427 struct mgmt_cp_pair_device *cp = data;
3428 struct mgmt_rp_pair_device rp;
3429 struct mgmt_pending_cmd *cmd;
3430 u8 sec_level, auth_type;
3431 struct hci_conn *conn;
3432 int err;
3433
3434 bt_dev_dbg(hdev, "sock %p", sk);
3435
3436 memset(&rp, 0, sizeof(rp));
3437 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3438 rp.addr.type = cp->addr.type;
3439
3440 if (!bdaddr_type_is_valid(cp->addr.type))
3441 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3442 MGMT_STATUS_INVALID_PARAMS,
3443 &rp, sizeof(rp));
3444
3445 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3446 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3447 MGMT_STATUS_INVALID_PARAMS,
3448 &rp, sizeof(rp));
3449
3450 hci_dev_lock(hdev);
3451
3452 if (!hdev_is_powered(hdev)) {
3453 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3454 MGMT_STATUS_NOT_POWERED, &rp,
3455 sizeof(rp));
3456 goto unlock;
3457 }
3458
3459 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3460 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3461 MGMT_STATUS_ALREADY_PAIRED, &rp,
3462 sizeof(rp));
3463 goto unlock;
3464 }
3465
3466 sec_level = BT_SECURITY_MEDIUM;
3467 auth_type = HCI_AT_DEDICATED_BONDING;
3468
3469 if (cp->addr.type == BDADDR_BREDR) {
3470 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3471 auth_type, CONN_REASON_PAIR_DEVICE);
3472 } else {
3473 u8 addr_type = le_addr_type(cp->addr.type);
3474 struct hci_conn_params *p;
3475
3476 /* When pairing a new device, it is expected to remember
3477 * this device for future connections. Adding the connection
3478 * parameter information ahead of time allows tracking
3479 * of the peripheral preferred values and will speed up any
3480 * further connection establishment.
3481 *
3482 * If connection parameters already exist, then they
3483 * will be kept and this function does nothing.
3484 */
3485 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3486 if (!p) {
3487 err = -EIO;
3488 goto unlock;
3489 }
3490
3491 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3492 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3493
3494 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3495 sec_level, HCI_LE_CONN_TIMEOUT,
3496 CONN_REASON_PAIR_DEVICE);
3497 }
3498
3499 if (IS_ERR(conn)) {
3500 int status;
3501
3502 if (PTR_ERR(conn) == -EBUSY)
3503 status = MGMT_STATUS_BUSY;
3504 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3505 status = MGMT_STATUS_NOT_SUPPORTED;
3506 else if (PTR_ERR(conn) == -ECONNREFUSED)
3507 status = MGMT_STATUS_REJECTED;
3508 else
3509 status = MGMT_STATUS_CONNECT_FAILED;
3510
3511 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3512 status, &rp, sizeof(rp));
3513 goto unlock;
3514 }
3515
3516 if (conn->connect_cfm_cb) {
3517 hci_conn_drop(conn);
3518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3520 goto unlock;
3521 }
3522
3523 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3524 if (!cmd) {
3525 err = -ENOMEM;
3526 hci_conn_drop(conn);
3527 goto unlock;
3528 }
3529
3530 cmd->cmd_complete = pairing_complete;
3531
3532 /* For LE, just connecting isn't a proof that the pairing finished */
3533 if (cp->addr.type == BDADDR_BREDR) {
3534 conn->connect_cfm_cb = pairing_complete_cb;
3535 conn->security_cfm_cb = pairing_complete_cb;
3536 conn->disconn_cfm_cb = pairing_complete_cb;
3537 } else {
3538 conn->connect_cfm_cb = le_pairing_complete_cb;
3539 conn->security_cfm_cb = le_pairing_complete_cb;
3540 conn->disconn_cfm_cb = le_pairing_complete_cb;
3541 }
3542
3543 conn->io_capability = cp->io_cap;
3544 cmd->user_data = hci_conn_get(conn);
3545
3546 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3547 hci_conn_security(conn, sec_level, auth_type, true)) {
3548 cmd->cmd_complete(cmd, 0);
3549 mgmt_pending_remove(cmd);
3550 }
3551
3552 err = 0;
3553
3554 unlock:
3555 hci_dev_unlock(hdev);
3556 return err;
3557 }
3558
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3559 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3560 u16 len)
3561 {
3562 struct mgmt_addr_info *addr = data;
3563 struct mgmt_pending_cmd *cmd;
3564 struct hci_conn *conn;
3565 int err;
3566
3567 bt_dev_dbg(hdev, "sock %p", sk);
3568
3569 hci_dev_lock(hdev);
3570
3571 if (!hdev_is_powered(hdev)) {
3572 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3573 MGMT_STATUS_NOT_POWERED);
3574 goto unlock;
3575 }
3576
3577 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3578 if (!cmd) {
3579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3580 MGMT_STATUS_INVALID_PARAMS);
3581 goto unlock;
3582 }
3583
3584 conn = cmd->user_data;
3585
3586 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3588 MGMT_STATUS_INVALID_PARAMS);
3589 goto unlock;
3590 }
3591
3592 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3593 mgmt_pending_remove(cmd);
3594
3595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3596 addr, sizeof(*addr));
3597
3598 /* Since user doesn't want to proceed with the connection, abort any
3599 * ongoing pairing and then terminate the link if it was created
3600 * because of the pair device action.
3601 */
3602 if (addr->type == BDADDR_BREDR)
3603 hci_remove_link_key(hdev, &addr->bdaddr);
3604 else
3605 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3606 le_addr_type(addr->type));
3607
3608 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3609 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3610
3611 unlock:
3612 hci_dev_unlock(hdev);
3613 return err;
3614 }
3615
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3616 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3617 struct mgmt_addr_info *addr, u16 mgmt_op,
3618 u16 hci_op, __le32 passkey)
3619 {
3620 struct mgmt_pending_cmd *cmd;
3621 struct hci_conn *conn;
3622 int err;
3623
3624 hci_dev_lock(hdev);
3625
3626 if (!hdev_is_powered(hdev)) {
3627 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3628 MGMT_STATUS_NOT_POWERED, addr,
3629 sizeof(*addr));
3630 goto done;
3631 }
3632
3633 if (addr->type == BDADDR_BREDR)
3634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3635 else
3636 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3637 le_addr_type(addr->type));
3638
3639 if (!conn) {
3640 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3641 MGMT_STATUS_NOT_CONNECTED, addr,
3642 sizeof(*addr));
3643 goto done;
3644 }
3645
3646 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3647 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3648 if (!err)
3649 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3650 MGMT_STATUS_SUCCESS, addr,
3651 sizeof(*addr));
3652 else
3653 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3654 MGMT_STATUS_FAILED, addr,
3655 sizeof(*addr));
3656
3657 goto done;
3658 }
3659
3660 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3661 if (!cmd) {
3662 err = -ENOMEM;
3663 goto done;
3664 }
3665
3666 cmd->cmd_complete = addr_cmd_complete;
3667
3668 /* Continue with pairing via HCI */
3669 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3670 struct hci_cp_user_passkey_reply cp;
3671
3672 bacpy(&cp.bdaddr, &addr->bdaddr);
3673 cp.passkey = passkey;
3674 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3675 } else
3676 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3677 &addr->bdaddr);
3678
3679 if (err < 0)
3680 mgmt_pending_remove(cmd);
3681
3682 done:
3683 hci_dev_unlock(hdev);
3684 return err;
3685 }
3686
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3687 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3688 void *data, u16 len)
3689 {
3690 struct mgmt_cp_pin_code_neg_reply *cp = data;
3691
3692 bt_dev_dbg(hdev, "sock %p", sk);
3693
3694 return user_pairing_resp(sk, hdev, &cp->addr,
3695 MGMT_OP_PIN_CODE_NEG_REPLY,
3696 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3697 }
3698
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3699 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3700 u16 len)
3701 {
3702 struct mgmt_cp_user_confirm_reply *cp = data;
3703
3704 bt_dev_dbg(hdev, "sock %p", sk);
3705
3706 if (len != sizeof(*cp))
3707 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3708 MGMT_STATUS_INVALID_PARAMS);
3709
3710 return user_pairing_resp(sk, hdev, &cp->addr,
3711 MGMT_OP_USER_CONFIRM_REPLY,
3712 HCI_OP_USER_CONFIRM_REPLY, 0);
3713 }
3714
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3715 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3716 void *data, u16 len)
3717 {
3718 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3719
3720 bt_dev_dbg(hdev, "sock %p", sk);
3721
3722 return user_pairing_resp(sk, hdev, &cp->addr,
3723 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3724 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3725 }
3726
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3727 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3728 u16 len)
3729 {
3730 struct mgmt_cp_user_passkey_reply *cp = data;
3731
3732 bt_dev_dbg(hdev, "sock %p", sk);
3733
3734 return user_pairing_resp(sk, hdev, &cp->addr,
3735 MGMT_OP_USER_PASSKEY_REPLY,
3736 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3737 }
3738
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3739 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3740 void *data, u16 len)
3741 {
3742 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3743
3744 bt_dev_dbg(hdev, "sock %p", sk);
3745
3746 return user_pairing_resp(sk, hdev, &cp->addr,
3747 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3748 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3749 }
3750
adv_expire_sync(struct hci_dev * hdev,u32 flags)3751 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3752 {
3753 struct adv_info *adv_instance;
3754
3755 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3756 if (!adv_instance)
3757 return 0;
3758
3759 /* stop if current instance doesn't need to be changed */
3760 if (!(adv_instance->flags & flags))
3761 return 0;
3762
3763 cancel_adv_timeout(hdev);
3764
3765 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3766 if (!adv_instance)
3767 return 0;
3768
3769 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3770
3771 return 0;
3772 }
3773
name_changed_sync(struct hci_dev * hdev,void * data)3774 static int name_changed_sync(struct hci_dev *hdev, void *data)
3775 {
3776 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3777 }
3778
set_name_complete(struct hci_dev * hdev,void * data,int err)3779 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3780 {
3781 struct mgmt_pending_cmd *cmd = data;
3782 struct mgmt_cp_set_local_name *cp = cmd->param;
3783 u8 status = mgmt_status(err);
3784
3785 bt_dev_dbg(hdev, "err %d", err);
3786
3787 if (err == -ECANCELED ||
3788 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3789 return;
3790
3791 if (status) {
3792 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3793 status);
3794 } else {
3795 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3796 cp, sizeof(*cp));
3797
3798 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3799 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3800 }
3801
3802 mgmt_pending_remove(cmd);
3803 }
3804
set_name_sync(struct hci_dev * hdev,void * data)3805 static int set_name_sync(struct hci_dev *hdev, void *data)
3806 {
3807 if (lmp_bredr_capable(hdev)) {
3808 hci_update_name_sync(hdev);
3809 hci_update_eir_sync(hdev);
3810 }
3811
3812 /* The name is stored in the scan response data and so
3813 * no need to update the advertising data here.
3814 */
3815 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3816 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3817
3818 return 0;
3819 }
3820
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3821 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3822 u16 len)
3823 {
3824 struct mgmt_cp_set_local_name *cp = data;
3825 struct mgmt_pending_cmd *cmd;
3826 int err;
3827
3828 bt_dev_dbg(hdev, "sock %p", sk);
3829
3830 hci_dev_lock(hdev);
3831
3832 /* If the old values are the same as the new ones just return a
3833 * direct command complete event.
3834 */
3835 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3836 !memcmp(hdev->short_name, cp->short_name,
3837 sizeof(hdev->short_name))) {
3838 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3839 data, len);
3840 goto failed;
3841 }
3842
3843 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3844
3845 if (!hdev_is_powered(hdev)) {
3846 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3847
3848 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3849 data, len);
3850 if (err < 0)
3851 goto failed;
3852
3853 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3854 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3855 ext_info_changed(hdev, sk);
3856
3857 goto failed;
3858 }
3859
3860 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3861 if (!cmd)
3862 err = -ENOMEM;
3863 else
3864 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3865 set_name_complete);
3866
3867 if (err < 0) {
3868 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3869 MGMT_STATUS_FAILED);
3870
3871 if (cmd)
3872 mgmt_pending_remove(cmd);
3873
3874 goto failed;
3875 }
3876
3877 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3878
3879 failed:
3880 hci_dev_unlock(hdev);
3881 return err;
3882 }
3883
appearance_changed_sync(struct hci_dev * hdev,void * data)3884 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3885 {
3886 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3887 }
3888
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3889 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3890 u16 len)
3891 {
3892 struct mgmt_cp_set_appearance *cp = data;
3893 u16 appearance;
3894 int err;
3895
3896 bt_dev_dbg(hdev, "sock %p", sk);
3897
3898 if (!lmp_le_capable(hdev))
3899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3900 MGMT_STATUS_NOT_SUPPORTED);
3901
3902 appearance = le16_to_cpu(cp->appearance);
3903
3904 hci_dev_lock(hdev);
3905
3906 if (hdev->appearance != appearance) {
3907 hdev->appearance = appearance;
3908
3909 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3910 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3911 NULL);
3912
3913 ext_info_changed(hdev, sk);
3914 }
3915
3916 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3917 0);
3918
3919 hci_dev_unlock(hdev);
3920
3921 return err;
3922 }
3923
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3924 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3925 void *data, u16 len)
3926 {
3927 struct mgmt_rp_get_phy_configuration rp;
3928
3929 bt_dev_dbg(hdev, "sock %p", sk);
3930
3931 hci_dev_lock(hdev);
3932
3933 memset(&rp, 0, sizeof(rp));
3934
3935 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3936 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3937 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3938
3939 hci_dev_unlock(hdev);
3940
3941 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3942 &rp, sizeof(rp));
3943 }
3944
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3945 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3946 {
3947 struct mgmt_ev_phy_configuration_changed ev;
3948
3949 memset(&ev, 0, sizeof(ev));
3950
3951 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3952
3953 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3954 sizeof(ev), skip);
3955 }
3956
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3957 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3958 {
3959 struct mgmt_pending_cmd *cmd = data;
3960 struct sk_buff *skb = cmd->skb;
3961 u8 status = mgmt_status(err);
3962
3963 if (err == -ECANCELED ||
3964 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3965 return;
3966
3967 if (!status) {
3968 if (!skb)
3969 status = MGMT_STATUS_FAILED;
3970 else if (IS_ERR(skb))
3971 status = mgmt_status(PTR_ERR(skb));
3972 else
3973 status = mgmt_status(skb->data[0]);
3974 }
3975
3976 bt_dev_dbg(hdev, "status %d", status);
3977
3978 if (status) {
3979 mgmt_cmd_status(cmd->sk, hdev->id,
3980 MGMT_OP_SET_PHY_CONFIGURATION, status);
3981 } else {
3982 mgmt_cmd_complete(cmd->sk, hdev->id,
3983 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3984 NULL, 0);
3985
3986 mgmt_phy_configuration_changed(hdev, cmd->sk);
3987 }
3988
3989 if (skb && !IS_ERR(skb))
3990 kfree_skb(skb);
3991
3992 mgmt_pending_remove(cmd);
3993 }
3994
set_default_phy_sync(struct hci_dev * hdev,void * data)3995 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3996 {
3997 struct mgmt_pending_cmd *cmd = data;
3998 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3999 struct hci_cp_le_set_default_phy cp_phy;
4000 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4001
4002 memset(&cp_phy, 0, sizeof(cp_phy));
4003
4004 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4005 cp_phy.all_phys |= 0x01;
4006
4007 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4008 cp_phy.all_phys |= 0x02;
4009
4010 if (selected_phys & MGMT_PHY_LE_1M_TX)
4011 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4012
4013 if (selected_phys & MGMT_PHY_LE_2M_TX)
4014 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4015
4016 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4017 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4018
4019 if (selected_phys & MGMT_PHY_LE_1M_RX)
4020 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4021
4022 if (selected_phys & MGMT_PHY_LE_2M_RX)
4023 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4024
4025 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4026 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4027
4028 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4029 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4030
4031 return 0;
4032 }
4033
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4034 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4035 void *data, u16 len)
4036 {
4037 struct mgmt_cp_set_phy_configuration *cp = data;
4038 struct mgmt_pending_cmd *cmd;
4039 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4040 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4041 bool changed = false;
4042 int err;
4043
4044 bt_dev_dbg(hdev, "sock %p", sk);
4045
4046 configurable_phys = get_configurable_phys(hdev);
4047 supported_phys = get_supported_phys(hdev);
4048 selected_phys = __le32_to_cpu(cp->selected_phys);
4049
4050 if (selected_phys & ~supported_phys)
4051 return mgmt_cmd_status(sk, hdev->id,
4052 MGMT_OP_SET_PHY_CONFIGURATION,
4053 MGMT_STATUS_INVALID_PARAMS);
4054
4055 unconfigure_phys = supported_phys & ~configurable_phys;
4056
4057 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4058 return mgmt_cmd_status(sk, hdev->id,
4059 MGMT_OP_SET_PHY_CONFIGURATION,
4060 MGMT_STATUS_INVALID_PARAMS);
4061
4062 if (selected_phys == get_selected_phys(hdev))
4063 return mgmt_cmd_complete(sk, hdev->id,
4064 MGMT_OP_SET_PHY_CONFIGURATION,
4065 0, NULL, 0);
4066
4067 hci_dev_lock(hdev);
4068
4069 if (!hdev_is_powered(hdev)) {
4070 err = mgmt_cmd_status(sk, hdev->id,
4071 MGMT_OP_SET_PHY_CONFIGURATION,
4072 MGMT_STATUS_REJECTED);
4073 goto unlock;
4074 }
4075
4076 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4077 err = mgmt_cmd_status(sk, hdev->id,
4078 MGMT_OP_SET_PHY_CONFIGURATION,
4079 MGMT_STATUS_BUSY);
4080 goto unlock;
4081 }
4082
4083 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4084 pkt_type |= (HCI_DH3 | HCI_DM3);
4085 else
4086 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4087
4088 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4089 pkt_type |= (HCI_DH5 | HCI_DM5);
4090 else
4091 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4092
4093 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4094 pkt_type &= ~HCI_2DH1;
4095 else
4096 pkt_type |= HCI_2DH1;
4097
4098 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4099 pkt_type &= ~HCI_2DH3;
4100 else
4101 pkt_type |= HCI_2DH3;
4102
4103 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4104 pkt_type &= ~HCI_2DH5;
4105 else
4106 pkt_type |= HCI_2DH5;
4107
4108 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4109 pkt_type &= ~HCI_3DH1;
4110 else
4111 pkt_type |= HCI_3DH1;
4112
4113 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4114 pkt_type &= ~HCI_3DH3;
4115 else
4116 pkt_type |= HCI_3DH3;
4117
4118 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4119 pkt_type &= ~HCI_3DH5;
4120 else
4121 pkt_type |= HCI_3DH5;
4122
4123 if (pkt_type != hdev->pkt_type) {
4124 hdev->pkt_type = pkt_type;
4125 changed = true;
4126 }
4127
4128 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4129 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4130 if (changed)
4131 mgmt_phy_configuration_changed(hdev, sk);
4132
4133 err = mgmt_cmd_complete(sk, hdev->id,
4134 MGMT_OP_SET_PHY_CONFIGURATION,
4135 0, NULL, 0);
4136
4137 goto unlock;
4138 }
4139
4140 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4141 len);
4142 if (!cmd)
4143 err = -ENOMEM;
4144 else
4145 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4146 set_default_phy_complete);
4147
4148 if (err < 0) {
4149 err = mgmt_cmd_status(sk, hdev->id,
4150 MGMT_OP_SET_PHY_CONFIGURATION,
4151 MGMT_STATUS_FAILED);
4152
4153 if (cmd)
4154 mgmt_pending_remove(cmd);
4155 }
4156
4157 unlock:
4158 hci_dev_unlock(hdev);
4159
4160 return err;
4161 }
4162
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4163 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4164 u16 len)
4165 {
4166 int err = MGMT_STATUS_SUCCESS;
4167 struct mgmt_cp_set_blocked_keys *keys = data;
4168 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4169 sizeof(struct mgmt_blocked_key_info));
4170 u16 key_count, expected_len;
4171 int i;
4172
4173 bt_dev_dbg(hdev, "sock %p", sk);
4174
4175 key_count = __le16_to_cpu(keys->key_count);
4176 if (key_count > max_key_count) {
4177 bt_dev_err(hdev, "too big key_count value %u", key_count);
4178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4179 MGMT_STATUS_INVALID_PARAMS);
4180 }
4181
4182 expected_len = struct_size(keys, keys, key_count);
4183 if (expected_len != len) {
4184 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4185 expected_len, len);
4186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4187 MGMT_STATUS_INVALID_PARAMS);
4188 }
4189
4190 hci_dev_lock(hdev);
4191
4192 hci_blocked_keys_clear(hdev);
4193
4194 for (i = 0; i < key_count; ++i) {
4195 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4196
4197 if (!b) {
4198 err = MGMT_STATUS_NO_RESOURCES;
4199 break;
4200 }
4201
4202 b->type = keys->keys[i].type;
4203 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4204 list_add_rcu(&b->list, &hdev->blocked_keys);
4205 }
4206 hci_dev_unlock(hdev);
4207
4208 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4209 err, NULL, 0);
4210 }
4211
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4212 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4213 void *data, u16 len)
4214 {
4215 struct mgmt_mode *cp = data;
4216 int err;
4217 bool changed = false;
4218
4219 bt_dev_dbg(hdev, "sock %p", sk);
4220
4221 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4222 return mgmt_cmd_status(sk, hdev->id,
4223 MGMT_OP_SET_WIDEBAND_SPEECH,
4224 MGMT_STATUS_NOT_SUPPORTED);
4225
4226 if (cp->val != 0x00 && cp->val != 0x01)
4227 return mgmt_cmd_status(sk, hdev->id,
4228 MGMT_OP_SET_WIDEBAND_SPEECH,
4229 MGMT_STATUS_INVALID_PARAMS);
4230
4231 hci_dev_lock(hdev);
4232
4233 if (hdev_is_powered(hdev) &&
4234 !!cp->val != hci_dev_test_flag(hdev,
4235 HCI_WIDEBAND_SPEECH_ENABLED)) {
4236 err = mgmt_cmd_status(sk, hdev->id,
4237 MGMT_OP_SET_WIDEBAND_SPEECH,
4238 MGMT_STATUS_REJECTED);
4239 goto unlock;
4240 }
4241
4242 if (cp->val)
4243 changed = !hci_dev_test_and_set_flag(hdev,
4244 HCI_WIDEBAND_SPEECH_ENABLED);
4245 else
4246 changed = hci_dev_test_and_clear_flag(hdev,
4247 HCI_WIDEBAND_SPEECH_ENABLED);
4248
4249 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4250 if (err < 0)
4251 goto unlock;
4252
4253 if (changed)
4254 err = new_settings(hdev, sk);
4255
4256 unlock:
4257 hci_dev_unlock(hdev);
4258 return err;
4259 }
4260
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4261 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4262 void *data, u16 data_len)
4263 {
4264 char buf[20];
4265 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4266 u16 cap_len = 0;
4267 u8 flags = 0;
4268 u8 tx_power_range[2];
4269
4270 bt_dev_dbg(hdev, "sock %p", sk);
4271
4272 memset(&buf, 0, sizeof(buf));
4273
4274 hci_dev_lock(hdev);
4275
4276 /* When the Read Simple Pairing Options command is supported, then
4277 * the remote public key validation is supported.
4278 *
4279 * Alternatively, when Microsoft extensions are available, they can
4280 * indicate support for public key validation as well.
4281 */
4282 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4283 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4284
4285 flags |= 0x02; /* Remote public key validation (LE) */
4286
4287 /* When the Read Encryption Key Size command is supported, then the
4288 * encryption key size is enforced.
4289 */
4290 if (hdev->commands[20] & 0x10)
4291 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4292
4293 flags |= 0x08; /* Encryption key size enforcement (LE) */
4294
4295 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4296 &flags, 1);
4297
4298 /* When the Read Simple Pairing Options command is supported, then
4299 * also max encryption key size information is provided.
4300 */
4301 if (hdev->commands[41] & 0x08)
4302 cap_len = eir_append_le16(rp->cap, cap_len,
4303 MGMT_CAP_MAX_ENC_KEY_SIZE,
4304 hdev->max_enc_key_size);
4305
4306 cap_len = eir_append_le16(rp->cap, cap_len,
4307 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4308 SMP_MAX_ENC_KEY_SIZE);
4309
4310 /* Append the min/max LE tx power parameters if we were able to fetch
4311 * it from the controller
4312 */
4313 if (hdev->commands[38] & 0x80) {
4314 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4315 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4316 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4317 tx_power_range, 2);
4318 }
4319
4320 rp->cap_len = cpu_to_le16(cap_len);
4321
4322 hci_dev_unlock(hdev);
4323
4324 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4325 rp, sizeof(*rp) + cap_len);
4326 }
4327
4328 #ifdef CONFIG_BT_FEATURE_DEBUG
4329 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4330 static const u8 debug_uuid[16] = {
4331 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4332 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4333 };
4334 #endif
4335
4336 /* 330859bc-7506-492d-9370-9a6f0614037f */
4337 static const u8 quality_report_uuid[16] = {
4338 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4339 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4340 };
4341
4342 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4343 static const u8 offload_codecs_uuid[16] = {
4344 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4345 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4346 };
4347
4348 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4349 static const u8 le_simultaneous_roles_uuid[16] = {
4350 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4351 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4352 };
4353
4354 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4355 static const u8 rpa_resolution_uuid[16] = {
4356 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4357 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4358 };
4359
4360 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4361 static const u8 iso_socket_uuid[16] = {
4362 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4363 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4364 };
4365
4366 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4367 static const u8 mgmt_mesh_uuid[16] = {
4368 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4369 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4370 };
4371
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4372 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4373 void *data, u16 data_len)
4374 {
4375 struct mgmt_rp_read_exp_features_info *rp;
4376 size_t len;
4377 u16 idx = 0;
4378 u32 flags;
4379 int status;
4380
4381 bt_dev_dbg(hdev, "sock %p", sk);
4382
4383 /* Enough space for 7 features */
4384 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4385 rp = kzalloc(len, GFP_KERNEL);
4386 if (!rp)
4387 return -ENOMEM;
4388
4389 #ifdef CONFIG_BT_FEATURE_DEBUG
4390 if (!hdev) {
4391 flags = bt_dbg_get() ? BIT(0) : 0;
4392
4393 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4394 rp->features[idx].flags = cpu_to_le32(flags);
4395 idx++;
4396 }
4397 #endif
4398
4399 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4400 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4401 flags = BIT(0);
4402 else
4403 flags = 0;
4404
4405 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4406 rp->features[idx].flags = cpu_to_le32(flags);
4407 idx++;
4408 }
4409
4410 if (hdev && ll_privacy_capable(hdev)) {
4411 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4412 flags = BIT(0) | BIT(1);
4413 else
4414 flags = BIT(1);
4415
4416 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4417 rp->features[idx].flags = cpu_to_le32(flags);
4418 idx++;
4419 }
4420
4421 if (hdev && (aosp_has_quality_report(hdev) ||
4422 hdev->set_quality_report)) {
4423 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4424 flags = BIT(0);
4425 else
4426 flags = 0;
4427
4428 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4429 rp->features[idx].flags = cpu_to_le32(flags);
4430 idx++;
4431 }
4432
4433 if (hdev && hdev->get_data_path_id) {
4434 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4435 flags = BIT(0);
4436 else
4437 flags = 0;
4438
4439 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4440 rp->features[idx].flags = cpu_to_le32(flags);
4441 idx++;
4442 }
4443
4444 if (IS_ENABLED(CONFIG_BT_LE)) {
4445 flags = iso_enabled() ? BIT(0) : 0;
4446 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4447 rp->features[idx].flags = cpu_to_le32(flags);
4448 idx++;
4449 }
4450
4451 if (hdev && lmp_le_capable(hdev)) {
4452 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4453 flags = BIT(0);
4454 else
4455 flags = 0;
4456
4457 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4458 rp->features[idx].flags = cpu_to_le32(flags);
4459 idx++;
4460 }
4461
4462 rp->feature_count = cpu_to_le16(idx);
4463
4464 /* After reading the experimental features information, enable
4465 * the events to update client on any future change.
4466 */
4467 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4468
4469 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4470 MGMT_OP_READ_EXP_FEATURES_INFO,
4471 0, rp, sizeof(*rp) + (20 * idx));
4472
4473 kfree(rp);
4474 return status;
4475 }
4476
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4477 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4478 struct sock *skip)
4479 {
4480 struct mgmt_ev_exp_feature_changed ev;
4481
4482 memset(&ev, 0, sizeof(ev));
4483 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4484 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4485
4486 // Do we need to be atomic with the conn_flags?
4487 if (enabled && privacy_mode_capable(hdev))
4488 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4489 else
4490 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4491
4492 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4493 &ev, sizeof(ev),
4494 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4495
4496 }
4497
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4498 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4499 bool enabled, struct sock *skip)
4500 {
4501 struct mgmt_ev_exp_feature_changed ev;
4502
4503 memset(&ev, 0, sizeof(ev));
4504 memcpy(ev.uuid, uuid, 16);
4505 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4506
4507 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4508 &ev, sizeof(ev),
4509 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4510 }
4511
4512 #define EXP_FEAT(_uuid, _set_func) \
4513 { \
4514 .uuid = _uuid, \
4515 .set_func = _set_func, \
4516 }
4517
4518 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4519 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4520 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4521 {
4522 struct mgmt_rp_set_exp_feature rp;
4523
4524 memset(rp.uuid, 0, 16);
4525 rp.flags = cpu_to_le32(0);
4526
4527 #ifdef CONFIG_BT_FEATURE_DEBUG
4528 if (!hdev) {
4529 bool changed = bt_dbg_get();
4530
4531 bt_dbg_set(false);
4532
4533 if (changed)
4534 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4535 }
4536 #endif
4537
4538 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4539 bool changed;
4540
4541 changed = hci_dev_test_and_clear_flag(hdev,
4542 HCI_ENABLE_LL_PRIVACY);
4543 if (changed)
4544 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4545 sk);
4546 }
4547
4548 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4549
4550 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4551 MGMT_OP_SET_EXP_FEATURE, 0,
4552 &rp, sizeof(rp));
4553 }
4554
4555 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4556 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4557 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4558 {
4559 struct mgmt_rp_set_exp_feature rp;
4560
4561 bool val, changed;
4562 int err;
4563
4564 /* Command requires to use the non-controller index */
4565 if (hdev)
4566 return mgmt_cmd_status(sk, hdev->id,
4567 MGMT_OP_SET_EXP_FEATURE,
4568 MGMT_STATUS_INVALID_INDEX);
4569
4570 /* Parameters are limited to a single octet */
4571 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4572 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4573 MGMT_OP_SET_EXP_FEATURE,
4574 MGMT_STATUS_INVALID_PARAMS);
4575
4576 /* Only boolean on/off is supported */
4577 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4578 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4579 MGMT_OP_SET_EXP_FEATURE,
4580 MGMT_STATUS_INVALID_PARAMS);
4581
4582 val = !!cp->param[0];
4583 changed = val ? !bt_dbg_get() : bt_dbg_get();
4584 bt_dbg_set(val);
4585
4586 memcpy(rp.uuid, debug_uuid, 16);
4587 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4588
4589 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4590
4591 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4592 MGMT_OP_SET_EXP_FEATURE, 0,
4593 &rp, sizeof(rp));
4594
4595 if (changed)
4596 exp_feature_changed(hdev, debug_uuid, val, sk);
4597
4598 return err;
4599 }
4600 #endif
4601
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4602 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4603 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4604 {
4605 struct mgmt_rp_set_exp_feature rp;
4606 bool val, changed;
4607 int err;
4608
4609 /* Command requires to use the controller index */
4610 if (!hdev)
4611 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4612 MGMT_OP_SET_EXP_FEATURE,
4613 MGMT_STATUS_INVALID_INDEX);
4614
4615 /* Parameters are limited to a single octet */
4616 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4617 return mgmt_cmd_status(sk, hdev->id,
4618 MGMT_OP_SET_EXP_FEATURE,
4619 MGMT_STATUS_INVALID_PARAMS);
4620
4621 /* Only boolean on/off is supported */
4622 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4623 return mgmt_cmd_status(sk, hdev->id,
4624 MGMT_OP_SET_EXP_FEATURE,
4625 MGMT_STATUS_INVALID_PARAMS);
4626
4627 val = !!cp->param[0];
4628
4629 if (val) {
4630 changed = !hci_dev_test_and_set_flag(hdev,
4631 HCI_MESH_EXPERIMENTAL);
4632 } else {
4633 hci_dev_clear_flag(hdev, HCI_MESH);
4634 changed = hci_dev_test_and_clear_flag(hdev,
4635 HCI_MESH_EXPERIMENTAL);
4636 }
4637
4638 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4639 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4640
4641 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4642
4643 err = mgmt_cmd_complete(sk, hdev->id,
4644 MGMT_OP_SET_EXP_FEATURE, 0,
4645 &rp, sizeof(rp));
4646
4647 if (changed)
4648 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4649
4650 return err;
4651 }
4652
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4653 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4654 struct mgmt_cp_set_exp_feature *cp,
4655 u16 data_len)
4656 {
4657 struct mgmt_rp_set_exp_feature rp;
4658 bool val, changed;
4659 int err;
4660 u32 flags;
4661
4662 /* Command requires to use the controller index */
4663 if (!hdev)
4664 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4665 MGMT_OP_SET_EXP_FEATURE,
4666 MGMT_STATUS_INVALID_INDEX);
4667
4668 /* Changes can only be made when controller is powered down */
4669 if (hdev_is_powered(hdev))
4670 return mgmt_cmd_status(sk, hdev->id,
4671 MGMT_OP_SET_EXP_FEATURE,
4672 MGMT_STATUS_REJECTED);
4673
4674 /* Parameters are limited to a single octet */
4675 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4676 return mgmt_cmd_status(sk, hdev->id,
4677 MGMT_OP_SET_EXP_FEATURE,
4678 MGMT_STATUS_INVALID_PARAMS);
4679
4680 /* Only boolean on/off is supported */
4681 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4682 return mgmt_cmd_status(sk, hdev->id,
4683 MGMT_OP_SET_EXP_FEATURE,
4684 MGMT_STATUS_INVALID_PARAMS);
4685
4686 val = !!cp->param[0];
4687
4688 if (val) {
4689 changed = !hci_dev_test_and_set_flag(hdev,
4690 HCI_ENABLE_LL_PRIVACY);
4691 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4692
4693 /* Enable LL privacy + supported settings changed */
4694 flags = BIT(0) | BIT(1);
4695 } else {
4696 changed = hci_dev_test_and_clear_flag(hdev,
4697 HCI_ENABLE_LL_PRIVACY);
4698
4699 /* Disable LL privacy + supported settings changed */
4700 flags = BIT(1);
4701 }
4702
4703 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4704 rp.flags = cpu_to_le32(flags);
4705
4706 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4707
4708 err = mgmt_cmd_complete(sk, hdev->id,
4709 MGMT_OP_SET_EXP_FEATURE, 0,
4710 &rp, sizeof(rp));
4711
4712 if (changed)
4713 exp_ll_privacy_feature_changed(val, hdev, sk);
4714
4715 return err;
4716 }
4717
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4718 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4719 struct mgmt_cp_set_exp_feature *cp,
4720 u16 data_len)
4721 {
4722 struct mgmt_rp_set_exp_feature rp;
4723 bool val, changed;
4724 int err;
4725
4726 /* Command requires to use a valid controller index */
4727 if (!hdev)
4728 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4729 MGMT_OP_SET_EXP_FEATURE,
4730 MGMT_STATUS_INVALID_INDEX);
4731
4732 /* Parameters are limited to a single octet */
4733 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4734 return mgmt_cmd_status(sk, hdev->id,
4735 MGMT_OP_SET_EXP_FEATURE,
4736 MGMT_STATUS_INVALID_PARAMS);
4737
4738 /* Only boolean on/off is supported */
4739 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4740 return mgmt_cmd_status(sk, hdev->id,
4741 MGMT_OP_SET_EXP_FEATURE,
4742 MGMT_STATUS_INVALID_PARAMS);
4743
4744 hci_req_sync_lock(hdev);
4745
4746 val = !!cp->param[0];
4747 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4748
4749 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4750 err = mgmt_cmd_status(sk, hdev->id,
4751 MGMT_OP_SET_EXP_FEATURE,
4752 MGMT_STATUS_NOT_SUPPORTED);
4753 goto unlock_quality_report;
4754 }
4755
4756 if (changed) {
4757 if (hdev->set_quality_report)
4758 err = hdev->set_quality_report(hdev, val);
4759 else
4760 err = aosp_set_quality_report(hdev, val);
4761
4762 if (err) {
4763 err = mgmt_cmd_status(sk, hdev->id,
4764 MGMT_OP_SET_EXP_FEATURE,
4765 MGMT_STATUS_FAILED);
4766 goto unlock_quality_report;
4767 }
4768
4769 if (val)
4770 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4771 else
4772 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4773 }
4774
4775 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4776
4777 memcpy(rp.uuid, quality_report_uuid, 16);
4778 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4779 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4780
4781 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4782 &rp, sizeof(rp));
4783
4784 if (changed)
4785 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4786
4787 unlock_quality_report:
4788 hci_req_sync_unlock(hdev);
4789 return err;
4790 }
4791
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4792 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4793 struct mgmt_cp_set_exp_feature *cp,
4794 u16 data_len)
4795 {
4796 bool val, changed;
4797 int err;
4798 struct mgmt_rp_set_exp_feature rp;
4799
4800 /* Command requires to use a valid controller index */
4801 if (!hdev)
4802 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4803 MGMT_OP_SET_EXP_FEATURE,
4804 MGMT_STATUS_INVALID_INDEX);
4805
4806 /* Parameters are limited to a single octet */
4807 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4808 return mgmt_cmd_status(sk, hdev->id,
4809 MGMT_OP_SET_EXP_FEATURE,
4810 MGMT_STATUS_INVALID_PARAMS);
4811
4812 /* Only boolean on/off is supported */
4813 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4814 return mgmt_cmd_status(sk, hdev->id,
4815 MGMT_OP_SET_EXP_FEATURE,
4816 MGMT_STATUS_INVALID_PARAMS);
4817
4818 val = !!cp->param[0];
4819 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4820
4821 if (!hdev->get_data_path_id) {
4822 return mgmt_cmd_status(sk, hdev->id,
4823 MGMT_OP_SET_EXP_FEATURE,
4824 MGMT_STATUS_NOT_SUPPORTED);
4825 }
4826
4827 if (changed) {
4828 if (val)
4829 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4830 else
4831 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4832 }
4833
4834 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4835 val, changed);
4836
4837 memcpy(rp.uuid, offload_codecs_uuid, 16);
4838 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4839 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4840 err = mgmt_cmd_complete(sk, hdev->id,
4841 MGMT_OP_SET_EXP_FEATURE, 0,
4842 &rp, sizeof(rp));
4843
4844 if (changed)
4845 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4846
4847 return err;
4848 }
4849
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4850 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4851 struct mgmt_cp_set_exp_feature *cp,
4852 u16 data_len)
4853 {
4854 bool val, changed;
4855 int err;
4856 struct mgmt_rp_set_exp_feature rp;
4857
4858 /* Command requires to use a valid controller index */
4859 if (!hdev)
4860 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4861 MGMT_OP_SET_EXP_FEATURE,
4862 MGMT_STATUS_INVALID_INDEX);
4863
4864 /* Parameters are limited to a single octet */
4865 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4866 return mgmt_cmd_status(sk, hdev->id,
4867 MGMT_OP_SET_EXP_FEATURE,
4868 MGMT_STATUS_INVALID_PARAMS);
4869
4870 /* Only boolean on/off is supported */
4871 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4872 return mgmt_cmd_status(sk, hdev->id,
4873 MGMT_OP_SET_EXP_FEATURE,
4874 MGMT_STATUS_INVALID_PARAMS);
4875
4876 val = !!cp->param[0];
4877 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4878
4879 if (!hci_dev_le_state_simultaneous(hdev)) {
4880 return mgmt_cmd_status(sk, hdev->id,
4881 MGMT_OP_SET_EXP_FEATURE,
4882 MGMT_STATUS_NOT_SUPPORTED);
4883 }
4884
4885 if (changed) {
4886 if (val)
4887 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4888 else
4889 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4890 }
4891
4892 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4893 val, changed);
4894
4895 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4896 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4897 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4898 err = mgmt_cmd_complete(sk, hdev->id,
4899 MGMT_OP_SET_EXP_FEATURE, 0,
4900 &rp, sizeof(rp));
4901
4902 if (changed)
4903 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4904
4905 return err;
4906 }
4907
4908 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4909 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4910 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4911 {
4912 struct mgmt_rp_set_exp_feature rp;
4913 bool val, changed = false;
4914 int err;
4915
4916 /* Command requires to use the non-controller index */
4917 if (hdev)
4918 return mgmt_cmd_status(sk, hdev->id,
4919 MGMT_OP_SET_EXP_FEATURE,
4920 MGMT_STATUS_INVALID_INDEX);
4921
4922 /* Parameters are limited to a single octet */
4923 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4924 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4925 MGMT_OP_SET_EXP_FEATURE,
4926 MGMT_STATUS_INVALID_PARAMS);
4927
4928 /* Only boolean on/off is supported */
4929 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4930 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4931 MGMT_OP_SET_EXP_FEATURE,
4932 MGMT_STATUS_INVALID_PARAMS);
4933
4934 val = cp->param[0] ? true : false;
4935 if (val)
4936 err = iso_init();
4937 else
4938 err = iso_exit();
4939
4940 if (!err)
4941 changed = true;
4942
4943 memcpy(rp.uuid, iso_socket_uuid, 16);
4944 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4945
4946 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4947
4948 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4949 MGMT_OP_SET_EXP_FEATURE, 0,
4950 &rp, sizeof(rp));
4951
4952 if (changed)
4953 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4954
4955 return err;
4956 }
4957 #endif
4958
4959 static const struct mgmt_exp_feature {
4960 const u8 *uuid;
4961 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4962 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4963 } exp_features[] = {
4964 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4965 #ifdef CONFIG_BT_FEATURE_DEBUG
4966 EXP_FEAT(debug_uuid, set_debug_func),
4967 #endif
4968 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4969 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4970 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4971 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4972 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4973 #ifdef CONFIG_BT_LE
4974 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4975 #endif
4976
4977 /* end with a null feature */
4978 EXP_FEAT(NULL, NULL)
4979 };
4980
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4981 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4982 void *data, u16 data_len)
4983 {
4984 struct mgmt_cp_set_exp_feature *cp = data;
4985 size_t i = 0;
4986
4987 bt_dev_dbg(hdev, "sock %p", sk);
4988
4989 for (i = 0; exp_features[i].uuid; i++) {
4990 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4991 return exp_features[i].set_func(sk, hdev, cp, data_len);
4992 }
4993
4994 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4995 MGMT_OP_SET_EXP_FEATURE,
4996 MGMT_STATUS_NOT_SUPPORTED);
4997 }
4998
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4999 static u32 get_params_flags(struct hci_dev *hdev,
5000 struct hci_conn_params *params)
5001 {
5002 u32 flags = hdev->conn_flags;
5003
5004 /* Devices using RPAs can only be programmed in the acceptlist if
5005 * LL Privacy has been enable otherwise they cannot mark
5006 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5007 */
5008 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5009 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5010 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5011
5012 return flags;
5013 }
5014
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5015 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5016 u16 data_len)
5017 {
5018 struct mgmt_cp_get_device_flags *cp = data;
5019 struct mgmt_rp_get_device_flags rp;
5020 struct bdaddr_list_with_flags *br_params;
5021 struct hci_conn_params *params;
5022 u32 supported_flags;
5023 u32 current_flags = 0;
5024 u8 status = MGMT_STATUS_INVALID_PARAMS;
5025
5026 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5027 &cp->addr.bdaddr, cp->addr.type);
5028
5029 hci_dev_lock(hdev);
5030
5031 supported_flags = hdev->conn_flags;
5032
5033 memset(&rp, 0, sizeof(rp));
5034
5035 if (cp->addr.type == BDADDR_BREDR) {
5036 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5037 &cp->addr.bdaddr,
5038 cp->addr.type);
5039 if (!br_params)
5040 goto done;
5041
5042 current_flags = br_params->flags;
5043 } else {
5044 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5045 le_addr_type(cp->addr.type));
5046 if (!params)
5047 goto done;
5048
5049 supported_flags = get_params_flags(hdev, params);
5050 current_flags = params->flags;
5051 }
5052
5053 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5054 rp.addr.type = cp->addr.type;
5055 rp.supported_flags = cpu_to_le32(supported_flags);
5056 rp.current_flags = cpu_to_le32(current_flags);
5057
5058 status = MGMT_STATUS_SUCCESS;
5059
5060 done:
5061 hci_dev_unlock(hdev);
5062
5063 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5064 &rp, sizeof(rp));
5065 }
5066
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5067 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5068 bdaddr_t *bdaddr, u8 bdaddr_type,
5069 u32 supported_flags, u32 current_flags)
5070 {
5071 struct mgmt_ev_device_flags_changed ev;
5072
5073 bacpy(&ev.addr.bdaddr, bdaddr);
5074 ev.addr.type = bdaddr_type;
5075 ev.supported_flags = cpu_to_le32(supported_flags);
5076 ev.current_flags = cpu_to_le32(current_flags);
5077
5078 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5079 }
5080
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5081 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5082 u16 len)
5083 {
5084 struct mgmt_cp_set_device_flags *cp = data;
5085 struct bdaddr_list_with_flags *br_params;
5086 struct hci_conn_params *params;
5087 u8 status = MGMT_STATUS_INVALID_PARAMS;
5088 u32 supported_flags;
5089 u32 current_flags = __le32_to_cpu(cp->current_flags);
5090
5091 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5092 &cp->addr.bdaddr, cp->addr.type, current_flags);
5093
5094 // We should take hci_dev_lock() early, I think.. conn_flags can change
5095 supported_flags = hdev->conn_flags;
5096
5097 if ((supported_flags | current_flags) != supported_flags) {
5098 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5099 current_flags, supported_flags);
5100 goto done;
5101 }
5102
5103 hci_dev_lock(hdev);
5104
5105 if (cp->addr.type == BDADDR_BREDR) {
5106 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5107 &cp->addr.bdaddr,
5108 cp->addr.type);
5109
5110 if (br_params) {
5111 br_params->flags = current_flags;
5112 status = MGMT_STATUS_SUCCESS;
5113 } else {
5114 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5115 &cp->addr.bdaddr, cp->addr.type);
5116 }
5117
5118 goto unlock;
5119 }
5120
5121 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5122 le_addr_type(cp->addr.type));
5123 if (!params) {
5124 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5125 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5126 goto unlock;
5127 }
5128
5129 supported_flags = get_params_flags(hdev, params);
5130
5131 if ((supported_flags | current_flags) != supported_flags) {
5132 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5133 current_flags, supported_flags);
5134 goto unlock;
5135 }
5136
5137 WRITE_ONCE(params->flags, current_flags);
5138 status = MGMT_STATUS_SUCCESS;
5139
5140 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5141 * has been set.
5142 */
5143 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5144 hci_update_passive_scan(hdev);
5145
5146 unlock:
5147 hci_dev_unlock(hdev);
5148
5149 done:
5150 if (status == MGMT_STATUS_SUCCESS)
5151 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5152 supported_flags, current_flags);
5153
5154 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5155 &cp->addr, sizeof(cp->addr));
5156 }
5157
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5158 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5159 u16 handle)
5160 {
5161 struct mgmt_ev_adv_monitor_added ev;
5162
5163 ev.monitor_handle = cpu_to_le16(handle);
5164
5165 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5166 }
5167
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5168 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5169 {
5170 struct mgmt_ev_adv_monitor_removed ev;
5171 struct mgmt_pending_cmd *cmd;
5172 struct sock *sk_skip = NULL;
5173 struct mgmt_cp_remove_adv_monitor *cp;
5174
5175 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5176 if (cmd) {
5177 cp = cmd->param;
5178
5179 if (cp->monitor_handle)
5180 sk_skip = cmd->sk;
5181 }
5182
5183 ev.monitor_handle = cpu_to_le16(handle);
5184
5185 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5186 }
5187
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5188 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5189 void *data, u16 len)
5190 {
5191 struct adv_monitor *monitor = NULL;
5192 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5193 int handle, err;
5194 size_t rp_size = 0;
5195 __u32 supported = 0;
5196 __u32 enabled = 0;
5197 __u16 num_handles = 0;
5198 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5199
5200 BT_DBG("request for %s", hdev->name);
5201
5202 hci_dev_lock(hdev);
5203
5204 if (msft_monitor_supported(hdev))
5205 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5206
5207 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5208 handles[num_handles++] = monitor->handle;
5209
5210 hci_dev_unlock(hdev);
5211
5212 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5213 rp = kmalloc(rp_size, GFP_KERNEL);
5214 if (!rp)
5215 return -ENOMEM;
5216
5217 /* All supported features are currently enabled */
5218 enabled = supported;
5219
5220 rp->supported_features = cpu_to_le32(supported);
5221 rp->enabled_features = cpu_to_le32(enabled);
5222 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5223 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5224 rp->num_handles = cpu_to_le16(num_handles);
5225 if (num_handles)
5226 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5227
5228 err = mgmt_cmd_complete(sk, hdev->id,
5229 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5230 MGMT_STATUS_SUCCESS, rp, rp_size);
5231
5232 kfree(rp);
5233
5234 return err;
5235 }
5236
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5237 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5238 void *data, int status)
5239 {
5240 struct mgmt_rp_add_adv_patterns_monitor rp;
5241 struct mgmt_pending_cmd *cmd = data;
5242 struct adv_monitor *monitor = cmd->user_data;
5243
5244 hci_dev_lock(hdev);
5245
5246 rp.monitor_handle = cpu_to_le16(monitor->handle);
5247
5248 if (!status) {
5249 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5250 hdev->adv_monitors_cnt++;
5251 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5252 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5253 hci_update_passive_scan(hdev);
5254 }
5255
5256 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5257 mgmt_status(status), &rp, sizeof(rp));
5258 mgmt_pending_remove(cmd);
5259
5260 hci_dev_unlock(hdev);
5261 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5262 rp.monitor_handle, status);
5263 }
5264
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5265 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5266 {
5267 struct mgmt_pending_cmd *cmd = data;
5268 struct adv_monitor *monitor = cmd->user_data;
5269
5270 return hci_add_adv_monitor(hdev, monitor);
5271 }
5272
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5273 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5274 struct adv_monitor *m, u8 status,
5275 void *data, u16 len, u16 op)
5276 {
5277 struct mgmt_pending_cmd *cmd;
5278 int err;
5279
5280 hci_dev_lock(hdev);
5281
5282 if (status)
5283 goto unlock;
5284
5285 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5286 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5287 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5288 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5289 status = MGMT_STATUS_BUSY;
5290 goto unlock;
5291 }
5292
5293 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5294 if (!cmd) {
5295 status = MGMT_STATUS_NO_RESOURCES;
5296 goto unlock;
5297 }
5298
5299 cmd->user_data = m;
5300 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5301 mgmt_add_adv_patterns_monitor_complete);
5302 if (err) {
5303 if (err == -ENOMEM)
5304 status = MGMT_STATUS_NO_RESOURCES;
5305 else
5306 status = MGMT_STATUS_FAILED;
5307
5308 goto unlock;
5309 }
5310
5311 hci_dev_unlock(hdev);
5312
5313 return 0;
5314
5315 unlock:
5316 hci_free_adv_monitor(hdev, m);
5317 hci_dev_unlock(hdev);
5318 return mgmt_cmd_status(sk, hdev->id, op, status);
5319 }
5320
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5321 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5322 struct mgmt_adv_rssi_thresholds *rssi)
5323 {
5324 if (rssi) {
5325 m->rssi.low_threshold = rssi->low_threshold;
5326 m->rssi.low_threshold_timeout =
5327 __le16_to_cpu(rssi->low_threshold_timeout);
5328 m->rssi.high_threshold = rssi->high_threshold;
5329 m->rssi.high_threshold_timeout =
5330 __le16_to_cpu(rssi->high_threshold_timeout);
5331 m->rssi.sampling_period = rssi->sampling_period;
5332 } else {
5333 /* Default values. These numbers are the least constricting
5334 * parameters for MSFT API to work, so it behaves as if there
5335 * are no rssi parameter to consider. May need to be changed
5336 * if other API are to be supported.
5337 */
5338 m->rssi.low_threshold = -127;
5339 m->rssi.low_threshold_timeout = 60;
5340 m->rssi.high_threshold = -127;
5341 m->rssi.high_threshold_timeout = 0;
5342 m->rssi.sampling_period = 0;
5343 }
5344 }
5345
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5346 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5347 struct mgmt_adv_pattern *patterns)
5348 {
5349 u8 offset = 0, length = 0;
5350 struct adv_pattern *p = NULL;
5351 int i;
5352
5353 for (i = 0; i < pattern_count; i++) {
5354 offset = patterns[i].offset;
5355 length = patterns[i].length;
5356 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5357 length > HCI_MAX_EXT_AD_LENGTH ||
5358 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5359 return MGMT_STATUS_INVALID_PARAMS;
5360
5361 p = kmalloc(sizeof(*p), GFP_KERNEL);
5362 if (!p)
5363 return MGMT_STATUS_NO_RESOURCES;
5364
5365 p->ad_type = patterns[i].ad_type;
5366 p->offset = patterns[i].offset;
5367 p->length = patterns[i].length;
5368 memcpy(p->value, patterns[i].value, p->length);
5369
5370 INIT_LIST_HEAD(&p->list);
5371 list_add(&p->list, &m->patterns);
5372 }
5373
5374 return MGMT_STATUS_SUCCESS;
5375 }
5376
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5377 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5378 void *data, u16 len)
5379 {
5380 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5381 struct adv_monitor *m = NULL;
5382 u8 status = MGMT_STATUS_SUCCESS;
5383 size_t expected_size = sizeof(*cp);
5384
5385 BT_DBG("request for %s", hdev->name);
5386
5387 if (len <= sizeof(*cp)) {
5388 status = MGMT_STATUS_INVALID_PARAMS;
5389 goto done;
5390 }
5391
5392 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5393 if (len != expected_size) {
5394 status = MGMT_STATUS_INVALID_PARAMS;
5395 goto done;
5396 }
5397
5398 m = kzalloc(sizeof(*m), GFP_KERNEL);
5399 if (!m) {
5400 status = MGMT_STATUS_NO_RESOURCES;
5401 goto done;
5402 }
5403
5404 INIT_LIST_HEAD(&m->patterns);
5405
5406 parse_adv_monitor_rssi(m, NULL);
5407 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5408
5409 done:
5410 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5411 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5412 }
5413
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5414 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5415 void *data, u16 len)
5416 {
5417 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5418 struct adv_monitor *m = NULL;
5419 u8 status = MGMT_STATUS_SUCCESS;
5420 size_t expected_size = sizeof(*cp);
5421
5422 BT_DBG("request for %s", hdev->name);
5423
5424 if (len <= sizeof(*cp)) {
5425 status = MGMT_STATUS_INVALID_PARAMS;
5426 goto done;
5427 }
5428
5429 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5430 if (len != expected_size) {
5431 status = MGMT_STATUS_INVALID_PARAMS;
5432 goto done;
5433 }
5434
5435 m = kzalloc(sizeof(*m), GFP_KERNEL);
5436 if (!m) {
5437 status = MGMT_STATUS_NO_RESOURCES;
5438 goto done;
5439 }
5440
5441 INIT_LIST_HEAD(&m->patterns);
5442
5443 parse_adv_monitor_rssi(m, &cp->rssi);
5444 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5445
5446 done:
5447 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5448 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5449 }
5450
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5451 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5452 void *data, int status)
5453 {
5454 struct mgmt_rp_remove_adv_monitor rp;
5455 struct mgmt_pending_cmd *cmd = data;
5456 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5457
5458 hci_dev_lock(hdev);
5459
5460 rp.monitor_handle = cp->monitor_handle;
5461
5462 if (!status)
5463 hci_update_passive_scan(hdev);
5464
5465 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5466 mgmt_status(status), &rp, sizeof(rp));
5467 mgmt_pending_remove(cmd);
5468
5469 hci_dev_unlock(hdev);
5470 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5471 rp.monitor_handle, status);
5472 }
5473
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5474 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5475 {
5476 struct mgmt_pending_cmd *cmd = data;
5477 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5478 u16 handle = __le16_to_cpu(cp->monitor_handle);
5479
5480 if (!handle)
5481 return hci_remove_all_adv_monitor(hdev);
5482
5483 return hci_remove_single_adv_monitor(hdev, handle);
5484 }
5485
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5486 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5487 void *data, u16 len)
5488 {
5489 struct mgmt_pending_cmd *cmd;
5490 int err, status;
5491
5492 hci_dev_lock(hdev);
5493
5494 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5495 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5496 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5497 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5498 status = MGMT_STATUS_BUSY;
5499 goto unlock;
5500 }
5501
5502 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5503 if (!cmd) {
5504 status = MGMT_STATUS_NO_RESOURCES;
5505 goto unlock;
5506 }
5507
5508 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5509 mgmt_remove_adv_monitor_complete);
5510
5511 if (err) {
5512 mgmt_pending_remove(cmd);
5513
5514 if (err == -ENOMEM)
5515 status = MGMT_STATUS_NO_RESOURCES;
5516 else
5517 status = MGMT_STATUS_FAILED;
5518
5519 goto unlock;
5520 }
5521
5522 hci_dev_unlock(hdev);
5523
5524 return 0;
5525
5526 unlock:
5527 hci_dev_unlock(hdev);
5528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5529 status);
5530 }
5531
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5532 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5533 {
5534 struct mgmt_rp_read_local_oob_data mgmt_rp;
5535 size_t rp_size = sizeof(mgmt_rp);
5536 struct mgmt_pending_cmd *cmd = data;
5537 struct sk_buff *skb = cmd->skb;
5538 u8 status = mgmt_status(err);
5539
5540 if (!status) {
5541 if (!skb)
5542 status = MGMT_STATUS_FAILED;
5543 else if (IS_ERR(skb))
5544 status = mgmt_status(PTR_ERR(skb));
5545 else
5546 status = mgmt_status(skb->data[0]);
5547 }
5548
5549 bt_dev_dbg(hdev, "status %d", status);
5550
5551 if (status) {
5552 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5553 goto remove;
5554 }
5555
5556 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5557
5558 if (!bredr_sc_enabled(hdev)) {
5559 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5560
5561 if (skb->len < sizeof(*rp)) {
5562 mgmt_cmd_status(cmd->sk, hdev->id,
5563 MGMT_OP_READ_LOCAL_OOB_DATA,
5564 MGMT_STATUS_FAILED);
5565 goto remove;
5566 }
5567
5568 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5569 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5570
5571 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5572 } else {
5573 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5574
5575 if (skb->len < sizeof(*rp)) {
5576 mgmt_cmd_status(cmd->sk, hdev->id,
5577 MGMT_OP_READ_LOCAL_OOB_DATA,
5578 MGMT_STATUS_FAILED);
5579 goto remove;
5580 }
5581
5582 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5583 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5584
5585 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5586 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5587 }
5588
5589 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5590 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5591
5592 remove:
5593 if (skb && !IS_ERR(skb))
5594 kfree_skb(skb);
5595
5596 mgmt_pending_free(cmd);
5597 }
5598
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5599 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5600 {
5601 struct mgmt_pending_cmd *cmd = data;
5602
5603 if (bredr_sc_enabled(hdev))
5604 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5605 else
5606 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5607
5608 if (IS_ERR(cmd->skb))
5609 return PTR_ERR(cmd->skb);
5610 else
5611 return 0;
5612 }
5613
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5614 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5615 void *data, u16 data_len)
5616 {
5617 struct mgmt_pending_cmd *cmd;
5618 int err;
5619
5620 bt_dev_dbg(hdev, "sock %p", sk);
5621
5622 hci_dev_lock(hdev);
5623
5624 if (!hdev_is_powered(hdev)) {
5625 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5626 MGMT_STATUS_NOT_POWERED);
5627 goto unlock;
5628 }
5629
5630 if (!lmp_ssp_capable(hdev)) {
5631 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5632 MGMT_STATUS_NOT_SUPPORTED);
5633 goto unlock;
5634 }
5635
5636 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5637 if (!cmd)
5638 err = -ENOMEM;
5639 else
5640 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5641 read_local_oob_data_complete);
5642
5643 if (err < 0) {
5644 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5645 MGMT_STATUS_FAILED);
5646
5647 if (cmd)
5648 mgmt_pending_free(cmd);
5649 }
5650
5651 unlock:
5652 hci_dev_unlock(hdev);
5653 return err;
5654 }
5655
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5656 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5657 void *data, u16 len)
5658 {
5659 struct mgmt_addr_info *addr = data;
5660 int err;
5661
5662 bt_dev_dbg(hdev, "sock %p", sk);
5663
5664 if (!bdaddr_type_is_valid(addr->type))
5665 return mgmt_cmd_complete(sk, hdev->id,
5666 MGMT_OP_ADD_REMOTE_OOB_DATA,
5667 MGMT_STATUS_INVALID_PARAMS,
5668 addr, sizeof(*addr));
5669
5670 hci_dev_lock(hdev);
5671
5672 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5673 struct mgmt_cp_add_remote_oob_data *cp = data;
5674 u8 status;
5675
5676 if (cp->addr.type != BDADDR_BREDR) {
5677 err = mgmt_cmd_complete(sk, hdev->id,
5678 MGMT_OP_ADD_REMOTE_OOB_DATA,
5679 MGMT_STATUS_INVALID_PARAMS,
5680 &cp->addr, sizeof(cp->addr));
5681 goto unlock;
5682 }
5683
5684 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5685 cp->addr.type, cp->hash,
5686 cp->rand, NULL, NULL);
5687 if (err < 0)
5688 status = MGMT_STATUS_FAILED;
5689 else
5690 status = MGMT_STATUS_SUCCESS;
5691
5692 err = mgmt_cmd_complete(sk, hdev->id,
5693 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5694 &cp->addr, sizeof(cp->addr));
5695 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5696 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5697 u8 *rand192, *hash192, *rand256, *hash256;
5698 u8 status;
5699
5700 if (bdaddr_type_is_le(cp->addr.type)) {
5701 /* Enforce zero-valued 192-bit parameters as
5702 * long as legacy SMP OOB isn't implemented.
5703 */
5704 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5705 memcmp(cp->hash192, ZERO_KEY, 16)) {
5706 err = mgmt_cmd_complete(sk, hdev->id,
5707 MGMT_OP_ADD_REMOTE_OOB_DATA,
5708 MGMT_STATUS_INVALID_PARAMS,
5709 addr, sizeof(*addr));
5710 goto unlock;
5711 }
5712
5713 rand192 = NULL;
5714 hash192 = NULL;
5715 } else {
5716 /* In case one of the P-192 values is set to zero,
5717 * then just disable OOB data for P-192.
5718 */
5719 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5720 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5721 rand192 = NULL;
5722 hash192 = NULL;
5723 } else {
5724 rand192 = cp->rand192;
5725 hash192 = cp->hash192;
5726 }
5727 }
5728
5729 /* In case one of the P-256 values is set to zero, then just
5730 * disable OOB data for P-256.
5731 */
5732 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5733 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5734 rand256 = NULL;
5735 hash256 = NULL;
5736 } else {
5737 rand256 = cp->rand256;
5738 hash256 = cp->hash256;
5739 }
5740
5741 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5742 cp->addr.type, hash192, rand192,
5743 hash256, rand256);
5744 if (err < 0)
5745 status = MGMT_STATUS_FAILED;
5746 else
5747 status = MGMT_STATUS_SUCCESS;
5748
5749 err = mgmt_cmd_complete(sk, hdev->id,
5750 MGMT_OP_ADD_REMOTE_OOB_DATA,
5751 status, &cp->addr, sizeof(cp->addr));
5752 } else {
5753 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5754 len);
5755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5756 MGMT_STATUS_INVALID_PARAMS);
5757 }
5758
5759 unlock:
5760 hci_dev_unlock(hdev);
5761 return err;
5762 }
5763
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5764 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5765 void *data, u16 len)
5766 {
5767 struct mgmt_cp_remove_remote_oob_data *cp = data;
5768 u8 status;
5769 int err;
5770
5771 bt_dev_dbg(hdev, "sock %p", sk);
5772
5773 if (cp->addr.type != BDADDR_BREDR)
5774 return mgmt_cmd_complete(sk, hdev->id,
5775 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5776 MGMT_STATUS_INVALID_PARAMS,
5777 &cp->addr, sizeof(cp->addr));
5778
5779 hci_dev_lock(hdev);
5780
5781 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5782 hci_remote_oob_data_clear(hdev);
5783 status = MGMT_STATUS_SUCCESS;
5784 goto done;
5785 }
5786
5787 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5788 if (err < 0)
5789 status = MGMT_STATUS_INVALID_PARAMS;
5790 else
5791 status = MGMT_STATUS_SUCCESS;
5792
5793 done:
5794 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5795 status, &cp->addr, sizeof(cp->addr));
5796
5797 hci_dev_unlock(hdev);
5798 return err;
5799 }
5800
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5801 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5802 {
5803 struct mgmt_pending_cmd *cmd;
5804
5805 bt_dev_dbg(hdev, "status %u", status);
5806
5807 hci_dev_lock(hdev);
5808
5809 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5810 if (!cmd)
5811 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5812
5813 if (!cmd)
5814 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5815
5816 if (cmd) {
5817 cmd->cmd_complete(cmd, mgmt_status(status));
5818 mgmt_pending_remove(cmd);
5819 }
5820
5821 hci_dev_unlock(hdev);
5822 }
5823
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5824 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5825 uint8_t *mgmt_status)
5826 {
5827 switch (type) {
5828 case DISCOV_TYPE_LE:
5829 *mgmt_status = mgmt_le_support(hdev);
5830 if (*mgmt_status)
5831 return false;
5832 break;
5833 case DISCOV_TYPE_INTERLEAVED:
5834 *mgmt_status = mgmt_le_support(hdev);
5835 if (*mgmt_status)
5836 return false;
5837 fallthrough;
5838 case DISCOV_TYPE_BREDR:
5839 *mgmt_status = mgmt_bredr_support(hdev);
5840 if (*mgmt_status)
5841 return false;
5842 break;
5843 default:
5844 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5845 return false;
5846 }
5847
5848 return true;
5849 }
5850
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5851 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5852 {
5853 struct mgmt_pending_cmd *cmd = data;
5854
5855 bt_dev_dbg(hdev, "err %d", err);
5856
5857 if (err == -ECANCELED)
5858 return;
5859
5860 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5861 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5862 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5863 return;
5864
5865 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5866 cmd->param, 1);
5867 mgmt_pending_remove(cmd);
5868
5869 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5870 DISCOVERY_FINDING);
5871 }
5872
start_discovery_sync(struct hci_dev * hdev,void * data)5873 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5874 {
5875 return hci_start_discovery_sync(hdev);
5876 }
5877
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5878 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5879 u16 op, void *data, u16 len)
5880 {
5881 struct mgmt_cp_start_discovery *cp = data;
5882 struct mgmt_pending_cmd *cmd;
5883 u8 status;
5884 int err;
5885
5886 bt_dev_dbg(hdev, "sock %p", sk);
5887
5888 hci_dev_lock(hdev);
5889
5890 if (!hdev_is_powered(hdev)) {
5891 err = mgmt_cmd_complete(sk, hdev->id, op,
5892 MGMT_STATUS_NOT_POWERED,
5893 &cp->type, sizeof(cp->type));
5894 goto failed;
5895 }
5896
5897 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5898 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5899 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5900 &cp->type, sizeof(cp->type));
5901 goto failed;
5902 }
5903
5904 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5905 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5906 &cp->type, sizeof(cp->type));
5907 goto failed;
5908 }
5909
5910 /* Can't start discovery when it is paused */
5911 if (hdev->discovery_paused) {
5912 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5913 &cp->type, sizeof(cp->type));
5914 goto failed;
5915 }
5916
5917 /* Clear the discovery filter first to free any previously
5918 * allocated memory for the UUID list.
5919 */
5920 hci_discovery_filter_clear(hdev);
5921
5922 hdev->discovery.type = cp->type;
5923 hdev->discovery.report_invalid_rssi = false;
5924 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5925 hdev->discovery.limited = true;
5926 else
5927 hdev->discovery.limited = false;
5928
5929 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5930 if (!cmd) {
5931 err = -ENOMEM;
5932 goto failed;
5933 }
5934
5935 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5936 start_discovery_complete);
5937 if (err < 0) {
5938 mgmt_pending_remove(cmd);
5939 goto failed;
5940 }
5941
5942 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5943
5944 failed:
5945 hci_dev_unlock(hdev);
5946 return err;
5947 }
5948
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5949 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5950 void *data, u16 len)
5951 {
5952 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5953 data, len);
5954 }
5955
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5956 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5957 void *data, u16 len)
5958 {
5959 return start_discovery_internal(sk, hdev,
5960 MGMT_OP_START_LIMITED_DISCOVERY,
5961 data, len);
5962 }
5963
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5964 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5965 void *data, u16 len)
5966 {
5967 struct mgmt_cp_start_service_discovery *cp = data;
5968 struct mgmt_pending_cmd *cmd;
5969 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5970 u16 uuid_count, expected_len;
5971 u8 status;
5972 int err;
5973
5974 bt_dev_dbg(hdev, "sock %p", sk);
5975
5976 hci_dev_lock(hdev);
5977
5978 if (!hdev_is_powered(hdev)) {
5979 err = mgmt_cmd_complete(sk, hdev->id,
5980 MGMT_OP_START_SERVICE_DISCOVERY,
5981 MGMT_STATUS_NOT_POWERED,
5982 &cp->type, sizeof(cp->type));
5983 goto failed;
5984 }
5985
5986 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5987 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5988 err = mgmt_cmd_complete(sk, hdev->id,
5989 MGMT_OP_START_SERVICE_DISCOVERY,
5990 MGMT_STATUS_BUSY, &cp->type,
5991 sizeof(cp->type));
5992 goto failed;
5993 }
5994
5995 if (hdev->discovery_paused) {
5996 err = mgmt_cmd_complete(sk, hdev->id,
5997 MGMT_OP_START_SERVICE_DISCOVERY,
5998 MGMT_STATUS_BUSY, &cp->type,
5999 sizeof(cp->type));
6000 goto failed;
6001 }
6002
6003 uuid_count = __le16_to_cpu(cp->uuid_count);
6004 if (uuid_count > max_uuid_count) {
6005 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6006 uuid_count);
6007 err = mgmt_cmd_complete(sk, hdev->id,
6008 MGMT_OP_START_SERVICE_DISCOVERY,
6009 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6010 sizeof(cp->type));
6011 goto failed;
6012 }
6013
6014 expected_len = sizeof(*cp) + uuid_count * 16;
6015 if (expected_len != len) {
6016 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6017 expected_len, len);
6018 err = mgmt_cmd_complete(sk, hdev->id,
6019 MGMT_OP_START_SERVICE_DISCOVERY,
6020 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6021 sizeof(cp->type));
6022 goto failed;
6023 }
6024
6025 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6026 err = mgmt_cmd_complete(sk, hdev->id,
6027 MGMT_OP_START_SERVICE_DISCOVERY,
6028 status, &cp->type, sizeof(cp->type));
6029 goto failed;
6030 }
6031
6032 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6033 hdev, data, len);
6034 if (!cmd) {
6035 err = -ENOMEM;
6036 goto failed;
6037 }
6038
6039 /* Clear the discovery filter first to free any previously
6040 * allocated memory for the UUID list.
6041 */
6042 hci_discovery_filter_clear(hdev);
6043
6044 hdev->discovery.result_filtering = true;
6045 hdev->discovery.type = cp->type;
6046 hdev->discovery.rssi = cp->rssi;
6047 hdev->discovery.uuid_count = uuid_count;
6048
6049 if (uuid_count > 0) {
6050 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6051 GFP_KERNEL);
6052 if (!hdev->discovery.uuids) {
6053 err = mgmt_cmd_complete(sk, hdev->id,
6054 MGMT_OP_START_SERVICE_DISCOVERY,
6055 MGMT_STATUS_FAILED,
6056 &cp->type, sizeof(cp->type));
6057 mgmt_pending_remove(cmd);
6058 goto failed;
6059 }
6060 }
6061
6062 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6063 start_discovery_complete);
6064 if (err < 0) {
6065 mgmt_pending_remove(cmd);
6066 goto failed;
6067 }
6068
6069 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6070
6071 failed:
6072 hci_dev_unlock(hdev);
6073 return err;
6074 }
6075
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6076 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6077 {
6078 struct mgmt_pending_cmd *cmd;
6079
6080 bt_dev_dbg(hdev, "status %u", status);
6081
6082 hci_dev_lock(hdev);
6083
6084 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6085 if (cmd) {
6086 cmd->cmd_complete(cmd, mgmt_status(status));
6087 mgmt_pending_remove(cmd);
6088 }
6089
6090 hci_dev_unlock(hdev);
6091 }
6092
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6093 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6094 {
6095 struct mgmt_pending_cmd *cmd = data;
6096
6097 if (err == -ECANCELED ||
6098 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6099 return;
6100
6101 bt_dev_dbg(hdev, "err %d", err);
6102
6103 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6104 cmd->param, 1);
6105 mgmt_pending_remove(cmd);
6106
6107 if (!err)
6108 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6109 }
6110
stop_discovery_sync(struct hci_dev * hdev,void * data)6111 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6112 {
6113 return hci_stop_discovery_sync(hdev);
6114 }
6115
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6116 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6117 u16 len)
6118 {
6119 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6120 struct mgmt_pending_cmd *cmd;
6121 int err;
6122
6123 bt_dev_dbg(hdev, "sock %p", sk);
6124
6125 hci_dev_lock(hdev);
6126
6127 if (!hci_discovery_active(hdev)) {
6128 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6129 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6130 sizeof(mgmt_cp->type));
6131 goto unlock;
6132 }
6133
6134 if (hdev->discovery.type != mgmt_cp->type) {
6135 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6136 MGMT_STATUS_INVALID_PARAMS,
6137 &mgmt_cp->type, sizeof(mgmt_cp->type));
6138 goto unlock;
6139 }
6140
6141 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6142 if (!cmd) {
6143 err = -ENOMEM;
6144 goto unlock;
6145 }
6146
6147 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6148 stop_discovery_complete);
6149 if (err < 0) {
6150 mgmt_pending_remove(cmd);
6151 goto unlock;
6152 }
6153
6154 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6155
6156 unlock:
6157 hci_dev_unlock(hdev);
6158 return err;
6159 }
6160
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6161 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6162 u16 len)
6163 {
6164 struct mgmt_cp_confirm_name *cp = data;
6165 struct inquiry_entry *e;
6166 int err;
6167
6168 bt_dev_dbg(hdev, "sock %p", sk);
6169
6170 hci_dev_lock(hdev);
6171
6172 if (!hci_discovery_active(hdev)) {
6173 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6174 MGMT_STATUS_FAILED, &cp->addr,
6175 sizeof(cp->addr));
6176 goto failed;
6177 }
6178
6179 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6180 if (!e) {
6181 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6182 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6183 sizeof(cp->addr));
6184 goto failed;
6185 }
6186
6187 if (cp->name_known) {
6188 e->name_state = NAME_KNOWN;
6189 list_del(&e->list);
6190 } else {
6191 e->name_state = NAME_NEEDED;
6192 hci_inquiry_cache_update_resolve(hdev, e);
6193 }
6194
6195 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6196 &cp->addr, sizeof(cp->addr));
6197
6198 failed:
6199 hci_dev_unlock(hdev);
6200 return err;
6201 }
6202
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6203 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6204 u16 len)
6205 {
6206 struct mgmt_cp_block_device *cp = data;
6207 u8 status;
6208 int err;
6209
6210 bt_dev_dbg(hdev, "sock %p", sk);
6211
6212 if (!bdaddr_type_is_valid(cp->addr.type))
6213 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6214 MGMT_STATUS_INVALID_PARAMS,
6215 &cp->addr, sizeof(cp->addr));
6216
6217 hci_dev_lock(hdev);
6218
6219 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6220 cp->addr.type);
6221 if (err < 0) {
6222 status = MGMT_STATUS_FAILED;
6223 goto done;
6224 }
6225
6226 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6227 sk);
6228 status = MGMT_STATUS_SUCCESS;
6229
6230 done:
6231 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6232 &cp->addr, sizeof(cp->addr));
6233
6234 hci_dev_unlock(hdev);
6235
6236 return err;
6237 }
6238
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6239 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6240 u16 len)
6241 {
6242 struct mgmt_cp_unblock_device *cp = data;
6243 u8 status;
6244 int err;
6245
6246 bt_dev_dbg(hdev, "sock %p", sk);
6247
6248 if (!bdaddr_type_is_valid(cp->addr.type))
6249 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6250 MGMT_STATUS_INVALID_PARAMS,
6251 &cp->addr, sizeof(cp->addr));
6252
6253 hci_dev_lock(hdev);
6254
6255 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6256 cp->addr.type);
6257 if (err < 0) {
6258 status = MGMT_STATUS_INVALID_PARAMS;
6259 goto done;
6260 }
6261
6262 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6263 sk);
6264 status = MGMT_STATUS_SUCCESS;
6265
6266 done:
6267 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6268 &cp->addr, sizeof(cp->addr));
6269
6270 hci_dev_unlock(hdev);
6271
6272 return err;
6273 }
6274
set_device_id_sync(struct hci_dev * hdev,void * data)6275 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6276 {
6277 return hci_update_eir_sync(hdev);
6278 }
6279
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6280 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6281 u16 len)
6282 {
6283 struct mgmt_cp_set_device_id *cp = data;
6284 int err;
6285 __u16 source;
6286
6287 bt_dev_dbg(hdev, "sock %p", sk);
6288
6289 source = __le16_to_cpu(cp->source);
6290
6291 if (source > 0x0002)
6292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6293 MGMT_STATUS_INVALID_PARAMS);
6294
6295 hci_dev_lock(hdev);
6296
6297 hdev->devid_source = source;
6298 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6299 hdev->devid_product = __le16_to_cpu(cp->product);
6300 hdev->devid_version = __le16_to_cpu(cp->version);
6301
6302 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6303 NULL, 0);
6304
6305 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6306
6307 hci_dev_unlock(hdev);
6308
6309 return err;
6310 }
6311
enable_advertising_instance(struct hci_dev * hdev,int err)6312 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6313 {
6314 if (err)
6315 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6316 else
6317 bt_dev_dbg(hdev, "status %d", err);
6318 }
6319
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6320 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6321 {
6322 struct cmd_lookup match = { NULL, hdev };
6323 u8 instance;
6324 struct adv_info *adv_instance;
6325 u8 status = mgmt_status(err);
6326
6327 if (status) {
6328 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6329 cmd_status_rsp, &status);
6330 return;
6331 }
6332
6333 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6334 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6335 else
6336 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6337
6338 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6339 &match);
6340
6341 new_settings(hdev, match.sk);
6342
6343 if (match.sk)
6344 sock_put(match.sk);
6345
6346 /* If "Set Advertising" was just disabled and instance advertising was
6347 * set up earlier, then re-enable multi-instance advertising.
6348 */
6349 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6350 list_empty(&hdev->adv_instances))
6351 return;
6352
6353 instance = hdev->cur_adv_instance;
6354 if (!instance) {
6355 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6356 struct adv_info, list);
6357 if (!adv_instance)
6358 return;
6359
6360 instance = adv_instance->instance;
6361 }
6362
6363 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6364
6365 enable_advertising_instance(hdev, err);
6366 }
6367
set_adv_sync(struct hci_dev * hdev,void * data)6368 static int set_adv_sync(struct hci_dev *hdev, void *data)
6369 {
6370 struct mgmt_pending_cmd *cmd = data;
6371 struct mgmt_mode *cp = cmd->param;
6372 u8 val = !!cp->val;
6373
6374 if (cp->val == 0x02)
6375 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6376 else
6377 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6378
6379 cancel_adv_timeout(hdev);
6380
6381 if (val) {
6382 /* Switch to instance "0" for the Set Advertising setting.
6383 * We cannot use update_[adv|scan_rsp]_data() here as the
6384 * HCI_ADVERTISING flag is not yet set.
6385 */
6386 hdev->cur_adv_instance = 0x00;
6387
6388 if (ext_adv_capable(hdev)) {
6389 hci_start_ext_adv_sync(hdev, 0x00);
6390 } else {
6391 hci_update_adv_data_sync(hdev, 0x00);
6392 hci_update_scan_rsp_data_sync(hdev, 0x00);
6393 hci_enable_advertising_sync(hdev);
6394 }
6395 } else {
6396 hci_disable_advertising_sync(hdev);
6397 }
6398
6399 return 0;
6400 }
6401
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6402 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6403 u16 len)
6404 {
6405 struct mgmt_mode *cp = data;
6406 struct mgmt_pending_cmd *cmd;
6407 u8 val, status;
6408 int err;
6409
6410 bt_dev_dbg(hdev, "sock %p", sk);
6411
6412 status = mgmt_le_support(hdev);
6413 if (status)
6414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6415 status);
6416
6417 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6418 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6419 MGMT_STATUS_INVALID_PARAMS);
6420
6421 if (hdev->advertising_paused)
6422 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6423 MGMT_STATUS_BUSY);
6424
6425 hci_dev_lock(hdev);
6426
6427 val = !!cp->val;
6428
6429 /* The following conditions are ones which mean that we should
6430 * not do any HCI communication but directly send a mgmt
6431 * response to user space (after toggling the flag if
6432 * necessary).
6433 */
6434 if (!hdev_is_powered(hdev) ||
6435 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6436 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6437 hci_dev_test_flag(hdev, HCI_MESH) ||
6438 hci_conn_num(hdev, LE_LINK) > 0 ||
6439 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6440 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6441 bool changed;
6442
6443 if (cp->val) {
6444 hdev->cur_adv_instance = 0x00;
6445 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6446 if (cp->val == 0x02)
6447 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6448 else
6449 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6450 } else {
6451 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6452 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6453 }
6454
6455 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6456 if (err < 0)
6457 goto unlock;
6458
6459 if (changed)
6460 err = new_settings(hdev, sk);
6461
6462 goto unlock;
6463 }
6464
6465 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6466 pending_find(MGMT_OP_SET_LE, hdev)) {
6467 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6468 MGMT_STATUS_BUSY);
6469 goto unlock;
6470 }
6471
6472 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6473 if (!cmd)
6474 err = -ENOMEM;
6475 else
6476 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6477 set_advertising_complete);
6478
6479 if (err < 0 && cmd)
6480 mgmt_pending_remove(cmd);
6481
6482 unlock:
6483 hci_dev_unlock(hdev);
6484 return err;
6485 }
6486
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6487 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6488 void *data, u16 len)
6489 {
6490 struct mgmt_cp_set_static_address *cp = data;
6491 int err;
6492
6493 bt_dev_dbg(hdev, "sock %p", sk);
6494
6495 if (!lmp_le_capable(hdev))
6496 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6497 MGMT_STATUS_NOT_SUPPORTED);
6498
6499 if (hdev_is_powered(hdev))
6500 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6501 MGMT_STATUS_REJECTED);
6502
6503 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6504 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6505 return mgmt_cmd_status(sk, hdev->id,
6506 MGMT_OP_SET_STATIC_ADDRESS,
6507 MGMT_STATUS_INVALID_PARAMS);
6508
6509 /* Two most significant bits shall be set */
6510 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6511 return mgmt_cmd_status(sk, hdev->id,
6512 MGMT_OP_SET_STATIC_ADDRESS,
6513 MGMT_STATUS_INVALID_PARAMS);
6514 }
6515
6516 hci_dev_lock(hdev);
6517
6518 bacpy(&hdev->static_addr, &cp->bdaddr);
6519
6520 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6521 if (err < 0)
6522 goto unlock;
6523
6524 err = new_settings(hdev, sk);
6525
6526 unlock:
6527 hci_dev_unlock(hdev);
6528 return err;
6529 }
6530
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6531 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6532 void *data, u16 len)
6533 {
6534 struct mgmt_cp_set_scan_params *cp = data;
6535 __u16 interval, window;
6536 int err;
6537
6538 bt_dev_dbg(hdev, "sock %p", sk);
6539
6540 if (!lmp_le_capable(hdev))
6541 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6542 MGMT_STATUS_NOT_SUPPORTED);
6543
6544 interval = __le16_to_cpu(cp->interval);
6545
6546 if (interval < 0x0004 || interval > 0x4000)
6547 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6548 MGMT_STATUS_INVALID_PARAMS);
6549
6550 window = __le16_to_cpu(cp->window);
6551
6552 if (window < 0x0004 || window > 0x4000)
6553 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6554 MGMT_STATUS_INVALID_PARAMS);
6555
6556 if (window > interval)
6557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6558 MGMT_STATUS_INVALID_PARAMS);
6559
6560 hci_dev_lock(hdev);
6561
6562 hdev->le_scan_interval = interval;
6563 hdev->le_scan_window = window;
6564
6565 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6566 NULL, 0);
6567
6568 /* If background scan is running, restart it so new parameters are
6569 * loaded.
6570 */
6571 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6572 hdev->discovery.state == DISCOVERY_STOPPED)
6573 hci_update_passive_scan(hdev);
6574
6575 hci_dev_unlock(hdev);
6576
6577 return err;
6578 }
6579
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6580 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6581 {
6582 struct mgmt_pending_cmd *cmd = data;
6583
6584 bt_dev_dbg(hdev, "err %d", err);
6585
6586 if (err) {
6587 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6588 mgmt_status(err));
6589 } else {
6590 struct mgmt_mode *cp = cmd->param;
6591
6592 if (cp->val)
6593 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6594 else
6595 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6596
6597 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6598 new_settings(hdev, cmd->sk);
6599 }
6600
6601 mgmt_pending_free(cmd);
6602 }
6603
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6604 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6605 {
6606 struct mgmt_pending_cmd *cmd = data;
6607 struct mgmt_mode *cp = cmd->param;
6608
6609 return hci_write_fast_connectable_sync(hdev, cp->val);
6610 }
6611
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6612 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6613 void *data, u16 len)
6614 {
6615 struct mgmt_mode *cp = data;
6616 struct mgmt_pending_cmd *cmd;
6617 int err;
6618
6619 bt_dev_dbg(hdev, "sock %p", sk);
6620
6621 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6622 hdev->hci_ver < BLUETOOTH_VER_1_2)
6623 return mgmt_cmd_status(sk, hdev->id,
6624 MGMT_OP_SET_FAST_CONNECTABLE,
6625 MGMT_STATUS_NOT_SUPPORTED);
6626
6627 if (cp->val != 0x00 && cp->val != 0x01)
6628 return mgmt_cmd_status(sk, hdev->id,
6629 MGMT_OP_SET_FAST_CONNECTABLE,
6630 MGMT_STATUS_INVALID_PARAMS);
6631
6632 hci_dev_lock(hdev);
6633
6634 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6635 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6636 goto unlock;
6637 }
6638
6639 if (!hdev_is_powered(hdev)) {
6640 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6641 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6642 new_settings(hdev, sk);
6643 goto unlock;
6644 }
6645
6646 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6647 len);
6648 if (!cmd)
6649 err = -ENOMEM;
6650 else
6651 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6652 fast_connectable_complete);
6653
6654 if (err < 0) {
6655 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6656 MGMT_STATUS_FAILED);
6657
6658 if (cmd)
6659 mgmt_pending_free(cmd);
6660 }
6661
6662 unlock:
6663 hci_dev_unlock(hdev);
6664
6665 return err;
6666 }
6667
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6668 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6669 {
6670 struct mgmt_pending_cmd *cmd = data;
6671
6672 bt_dev_dbg(hdev, "err %d", err);
6673
6674 if (err) {
6675 u8 mgmt_err = mgmt_status(err);
6676
6677 /* We need to restore the flag if related HCI commands
6678 * failed.
6679 */
6680 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6681
6682 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6683 } else {
6684 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6685 new_settings(hdev, cmd->sk);
6686 }
6687
6688 mgmt_pending_free(cmd);
6689 }
6690
set_bredr_sync(struct hci_dev * hdev,void * data)6691 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6692 {
6693 int status;
6694
6695 status = hci_write_fast_connectable_sync(hdev, false);
6696
6697 if (!status)
6698 status = hci_update_scan_sync(hdev);
6699
6700 /* Since only the advertising data flags will change, there
6701 * is no need to update the scan response data.
6702 */
6703 if (!status)
6704 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6705
6706 return status;
6707 }
6708
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6709 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6710 {
6711 struct mgmt_mode *cp = data;
6712 struct mgmt_pending_cmd *cmd;
6713 int err;
6714
6715 bt_dev_dbg(hdev, "sock %p", sk);
6716
6717 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6718 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6719 MGMT_STATUS_NOT_SUPPORTED);
6720
6721 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6722 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6723 MGMT_STATUS_REJECTED);
6724
6725 if (cp->val != 0x00 && cp->val != 0x01)
6726 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6727 MGMT_STATUS_INVALID_PARAMS);
6728
6729 hci_dev_lock(hdev);
6730
6731 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6732 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6733 goto unlock;
6734 }
6735
6736 if (!hdev_is_powered(hdev)) {
6737 if (!cp->val) {
6738 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6739 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6740 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6741 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6742 }
6743
6744 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6745
6746 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6747 if (err < 0)
6748 goto unlock;
6749
6750 err = new_settings(hdev, sk);
6751 goto unlock;
6752 }
6753
6754 /* Reject disabling when powered on */
6755 if (!cp->val) {
6756 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6757 MGMT_STATUS_REJECTED);
6758 goto unlock;
6759 } else {
6760 /* When configuring a dual-mode controller to operate
6761 * with LE only and using a static address, then switching
6762 * BR/EDR back on is not allowed.
6763 *
6764 * Dual-mode controllers shall operate with the public
6765 * address as its identity address for BR/EDR and LE. So
6766 * reject the attempt to create an invalid configuration.
6767 *
6768 * The same restrictions applies when secure connections
6769 * has been enabled. For BR/EDR this is a controller feature
6770 * while for LE it is a host stack feature. This means that
6771 * switching BR/EDR back on when secure connections has been
6772 * enabled is not a supported transaction.
6773 */
6774 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6775 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6776 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6778 MGMT_STATUS_REJECTED);
6779 goto unlock;
6780 }
6781 }
6782
6783 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6784 if (!cmd)
6785 err = -ENOMEM;
6786 else
6787 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6788 set_bredr_complete);
6789
6790 if (err < 0) {
6791 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6792 MGMT_STATUS_FAILED);
6793 if (cmd)
6794 mgmt_pending_free(cmd);
6795
6796 goto unlock;
6797 }
6798
6799 /* We need to flip the bit already here so that
6800 * hci_req_update_adv_data generates the correct flags.
6801 */
6802 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6803
6804 unlock:
6805 hci_dev_unlock(hdev);
6806 return err;
6807 }
6808
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6809 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6810 {
6811 struct mgmt_pending_cmd *cmd = data;
6812 struct mgmt_mode *cp;
6813
6814 bt_dev_dbg(hdev, "err %d", err);
6815
6816 if (err) {
6817 u8 mgmt_err = mgmt_status(err);
6818
6819 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6820 goto done;
6821 }
6822
6823 cp = cmd->param;
6824
6825 switch (cp->val) {
6826 case 0x00:
6827 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6828 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6829 break;
6830 case 0x01:
6831 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6832 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6833 break;
6834 case 0x02:
6835 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6836 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6837 break;
6838 }
6839
6840 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6841 new_settings(hdev, cmd->sk);
6842
6843 done:
6844 mgmt_pending_free(cmd);
6845 }
6846
set_secure_conn_sync(struct hci_dev * hdev,void * data)6847 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6848 {
6849 struct mgmt_pending_cmd *cmd = data;
6850 struct mgmt_mode *cp = cmd->param;
6851 u8 val = !!cp->val;
6852
6853 /* Force write of val */
6854 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6855
6856 return hci_write_sc_support_sync(hdev, val);
6857 }
6858
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6859 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6860 void *data, u16 len)
6861 {
6862 struct mgmt_mode *cp = data;
6863 struct mgmt_pending_cmd *cmd;
6864 u8 val;
6865 int err;
6866
6867 bt_dev_dbg(hdev, "sock %p", sk);
6868
6869 if (!lmp_sc_capable(hdev) &&
6870 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6871 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6872 MGMT_STATUS_NOT_SUPPORTED);
6873
6874 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6875 lmp_sc_capable(hdev) &&
6876 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6878 MGMT_STATUS_REJECTED);
6879
6880 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6882 MGMT_STATUS_INVALID_PARAMS);
6883
6884 hci_dev_lock(hdev);
6885
6886 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6887 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6888 bool changed;
6889
6890 if (cp->val) {
6891 changed = !hci_dev_test_and_set_flag(hdev,
6892 HCI_SC_ENABLED);
6893 if (cp->val == 0x02)
6894 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6895 else
6896 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6897 } else {
6898 changed = hci_dev_test_and_clear_flag(hdev,
6899 HCI_SC_ENABLED);
6900 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6901 }
6902
6903 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6904 if (err < 0)
6905 goto failed;
6906
6907 if (changed)
6908 err = new_settings(hdev, sk);
6909
6910 goto failed;
6911 }
6912
6913 val = !!cp->val;
6914
6915 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6916 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6917 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6918 goto failed;
6919 }
6920
6921 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6922 if (!cmd)
6923 err = -ENOMEM;
6924 else
6925 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6926 set_secure_conn_complete);
6927
6928 if (err < 0) {
6929 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6930 MGMT_STATUS_FAILED);
6931 if (cmd)
6932 mgmt_pending_free(cmd);
6933 }
6934
6935 failed:
6936 hci_dev_unlock(hdev);
6937 return err;
6938 }
6939
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6940 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6941 void *data, u16 len)
6942 {
6943 struct mgmt_mode *cp = data;
6944 bool changed, use_changed;
6945 int err;
6946
6947 bt_dev_dbg(hdev, "sock %p", sk);
6948
6949 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6951 MGMT_STATUS_INVALID_PARAMS);
6952
6953 hci_dev_lock(hdev);
6954
6955 if (cp->val)
6956 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6957 else
6958 changed = hci_dev_test_and_clear_flag(hdev,
6959 HCI_KEEP_DEBUG_KEYS);
6960
6961 if (cp->val == 0x02)
6962 use_changed = !hci_dev_test_and_set_flag(hdev,
6963 HCI_USE_DEBUG_KEYS);
6964 else
6965 use_changed = hci_dev_test_and_clear_flag(hdev,
6966 HCI_USE_DEBUG_KEYS);
6967
6968 if (hdev_is_powered(hdev) && use_changed &&
6969 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6970 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6971 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6972 sizeof(mode), &mode);
6973 }
6974
6975 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6976 if (err < 0)
6977 goto unlock;
6978
6979 if (changed)
6980 err = new_settings(hdev, sk);
6981
6982 unlock:
6983 hci_dev_unlock(hdev);
6984 return err;
6985 }
6986
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6987 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6988 u16 len)
6989 {
6990 struct mgmt_cp_set_privacy *cp = cp_data;
6991 bool changed;
6992 int err;
6993
6994 bt_dev_dbg(hdev, "sock %p", sk);
6995
6996 if (!lmp_le_capable(hdev))
6997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6998 MGMT_STATUS_NOT_SUPPORTED);
6999
7000 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7001 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7002 MGMT_STATUS_INVALID_PARAMS);
7003
7004 if (hdev_is_powered(hdev))
7005 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7006 MGMT_STATUS_REJECTED);
7007
7008 hci_dev_lock(hdev);
7009
7010 /* If user space supports this command it is also expected to
7011 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7012 */
7013 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7014
7015 if (cp->privacy) {
7016 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7017 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7018 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7019 hci_adv_instances_set_rpa_expired(hdev, true);
7020 if (cp->privacy == 0x02)
7021 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7022 else
7023 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7024 } else {
7025 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7026 memset(hdev->irk, 0, sizeof(hdev->irk));
7027 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7028 hci_adv_instances_set_rpa_expired(hdev, false);
7029 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7030 }
7031
7032 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7033 if (err < 0)
7034 goto unlock;
7035
7036 if (changed)
7037 err = new_settings(hdev, sk);
7038
7039 unlock:
7040 hci_dev_unlock(hdev);
7041 return err;
7042 }
7043
irk_is_valid(struct mgmt_irk_info * irk)7044 static bool irk_is_valid(struct mgmt_irk_info *irk)
7045 {
7046 switch (irk->addr.type) {
7047 case BDADDR_LE_PUBLIC:
7048 return true;
7049
7050 case BDADDR_LE_RANDOM:
7051 /* Two most significant bits shall be set */
7052 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7053 return false;
7054 return true;
7055 }
7056
7057 return false;
7058 }
7059
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7060 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7061 u16 len)
7062 {
7063 struct mgmt_cp_load_irks *cp = cp_data;
7064 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7065 sizeof(struct mgmt_irk_info));
7066 u16 irk_count, expected_len;
7067 int i, err;
7068
7069 bt_dev_dbg(hdev, "sock %p", sk);
7070
7071 if (!lmp_le_capable(hdev))
7072 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7073 MGMT_STATUS_NOT_SUPPORTED);
7074
7075 irk_count = __le16_to_cpu(cp->irk_count);
7076 if (irk_count > max_irk_count) {
7077 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7078 irk_count);
7079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7080 MGMT_STATUS_INVALID_PARAMS);
7081 }
7082
7083 expected_len = struct_size(cp, irks, irk_count);
7084 if (expected_len != len) {
7085 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7086 expected_len, len);
7087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7088 MGMT_STATUS_INVALID_PARAMS);
7089 }
7090
7091 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7092
7093 for (i = 0; i < irk_count; i++) {
7094 struct mgmt_irk_info *key = &cp->irks[i];
7095
7096 if (!irk_is_valid(key))
7097 return mgmt_cmd_status(sk, hdev->id,
7098 MGMT_OP_LOAD_IRKS,
7099 MGMT_STATUS_INVALID_PARAMS);
7100 }
7101
7102 hci_dev_lock(hdev);
7103
7104 hci_smp_irks_clear(hdev);
7105
7106 for (i = 0; i < irk_count; i++) {
7107 struct mgmt_irk_info *irk = &cp->irks[i];
7108
7109 if (hci_is_blocked_key(hdev,
7110 HCI_BLOCKED_KEY_TYPE_IRK,
7111 irk->val)) {
7112 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7113 &irk->addr.bdaddr);
7114 continue;
7115 }
7116
7117 hci_add_irk(hdev, &irk->addr.bdaddr,
7118 le_addr_type(irk->addr.type), irk->val,
7119 BDADDR_ANY);
7120 }
7121
7122 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7123
7124 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7125
7126 hci_dev_unlock(hdev);
7127
7128 return err;
7129 }
7130
ltk_is_valid(struct mgmt_ltk_info * key)7131 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7132 {
7133 if (key->initiator != 0x00 && key->initiator != 0x01)
7134 return false;
7135
7136 switch (key->addr.type) {
7137 case BDADDR_LE_PUBLIC:
7138 return true;
7139
7140 case BDADDR_LE_RANDOM:
7141 /* Two most significant bits shall be set */
7142 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7143 return false;
7144 return true;
7145 }
7146
7147 return false;
7148 }
7149
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7150 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7151 void *cp_data, u16 len)
7152 {
7153 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7154 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7155 sizeof(struct mgmt_ltk_info));
7156 u16 key_count, expected_len;
7157 int i, err;
7158
7159 bt_dev_dbg(hdev, "sock %p", sk);
7160
7161 if (!lmp_le_capable(hdev))
7162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7163 MGMT_STATUS_NOT_SUPPORTED);
7164
7165 key_count = __le16_to_cpu(cp->key_count);
7166 if (key_count > max_key_count) {
7167 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7168 key_count);
7169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7170 MGMT_STATUS_INVALID_PARAMS);
7171 }
7172
7173 expected_len = struct_size(cp, keys, key_count);
7174 if (expected_len != len) {
7175 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7176 expected_len, len);
7177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7178 MGMT_STATUS_INVALID_PARAMS);
7179 }
7180
7181 bt_dev_dbg(hdev, "key_count %u", key_count);
7182
7183 hci_dev_lock(hdev);
7184
7185 hci_smp_ltks_clear(hdev);
7186
7187 for (i = 0; i < key_count; i++) {
7188 struct mgmt_ltk_info *key = &cp->keys[i];
7189 u8 type, authenticated;
7190
7191 if (hci_is_blocked_key(hdev,
7192 HCI_BLOCKED_KEY_TYPE_LTK,
7193 key->val)) {
7194 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7195 &key->addr.bdaddr);
7196 continue;
7197 }
7198
7199 if (!ltk_is_valid(key)) {
7200 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7201 &key->addr.bdaddr);
7202 continue;
7203 }
7204
7205 switch (key->type) {
7206 case MGMT_LTK_UNAUTHENTICATED:
7207 authenticated = 0x00;
7208 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7209 break;
7210 case MGMT_LTK_AUTHENTICATED:
7211 authenticated = 0x01;
7212 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7213 break;
7214 case MGMT_LTK_P256_UNAUTH:
7215 authenticated = 0x00;
7216 type = SMP_LTK_P256;
7217 break;
7218 case MGMT_LTK_P256_AUTH:
7219 authenticated = 0x01;
7220 type = SMP_LTK_P256;
7221 break;
7222 case MGMT_LTK_P256_DEBUG:
7223 authenticated = 0x00;
7224 type = SMP_LTK_P256_DEBUG;
7225 fallthrough;
7226 default:
7227 continue;
7228 }
7229
7230 hci_add_ltk(hdev, &key->addr.bdaddr,
7231 le_addr_type(key->addr.type), type, authenticated,
7232 key->val, key->enc_size, key->ediv, key->rand);
7233 }
7234
7235 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7236 NULL, 0);
7237
7238 hci_dev_unlock(hdev);
7239
7240 return err;
7241 }
7242
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7243 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7244 {
7245 struct mgmt_pending_cmd *cmd = data;
7246 struct hci_conn *conn = cmd->user_data;
7247 struct mgmt_cp_get_conn_info *cp = cmd->param;
7248 struct mgmt_rp_get_conn_info rp;
7249 u8 status;
7250
7251 bt_dev_dbg(hdev, "err %d", err);
7252
7253 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7254
7255 status = mgmt_status(err);
7256 if (status == MGMT_STATUS_SUCCESS) {
7257 rp.rssi = conn->rssi;
7258 rp.tx_power = conn->tx_power;
7259 rp.max_tx_power = conn->max_tx_power;
7260 } else {
7261 rp.rssi = HCI_RSSI_INVALID;
7262 rp.tx_power = HCI_TX_POWER_INVALID;
7263 rp.max_tx_power = HCI_TX_POWER_INVALID;
7264 }
7265
7266 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7267 &rp, sizeof(rp));
7268
7269 mgmt_pending_free(cmd);
7270 }
7271
get_conn_info_sync(struct hci_dev * hdev,void * data)7272 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7273 {
7274 struct mgmt_pending_cmd *cmd = data;
7275 struct mgmt_cp_get_conn_info *cp = cmd->param;
7276 struct hci_conn *conn;
7277 int err;
7278 __le16 handle;
7279
7280 /* Make sure we are still connected */
7281 if (cp->addr.type == BDADDR_BREDR)
7282 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7283 &cp->addr.bdaddr);
7284 else
7285 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7286
7287 if (!conn || conn->state != BT_CONNECTED)
7288 return MGMT_STATUS_NOT_CONNECTED;
7289
7290 cmd->user_data = conn;
7291 handle = cpu_to_le16(conn->handle);
7292
7293 /* Refresh RSSI each time */
7294 err = hci_read_rssi_sync(hdev, handle);
7295
7296 /* For LE links TX power does not change thus we don't need to
7297 * query for it once value is known.
7298 */
7299 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7300 conn->tx_power == HCI_TX_POWER_INVALID))
7301 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7302
7303 /* Max TX power needs to be read only once per connection */
7304 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7305 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7306
7307 return err;
7308 }
7309
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7310 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7311 u16 len)
7312 {
7313 struct mgmt_cp_get_conn_info *cp = data;
7314 struct mgmt_rp_get_conn_info rp;
7315 struct hci_conn *conn;
7316 unsigned long conn_info_age;
7317 int err = 0;
7318
7319 bt_dev_dbg(hdev, "sock %p", sk);
7320
7321 memset(&rp, 0, sizeof(rp));
7322 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7323 rp.addr.type = cp->addr.type;
7324
7325 if (!bdaddr_type_is_valid(cp->addr.type))
7326 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7327 MGMT_STATUS_INVALID_PARAMS,
7328 &rp, sizeof(rp));
7329
7330 hci_dev_lock(hdev);
7331
7332 if (!hdev_is_powered(hdev)) {
7333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7334 MGMT_STATUS_NOT_POWERED, &rp,
7335 sizeof(rp));
7336 goto unlock;
7337 }
7338
7339 if (cp->addr.type == BDADDR_BREDR)
7340 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7341 &cp->addr.bdaddr);
7342 else
7343 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7344
7345 if (!conn || conn->state != BT_CONNECTED) {
7346 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7347 MGMT_STATUS_NOT_CONNECTED, &rp,
7348 sizeof(rp));
7349 goto unlock;
7350 }
7351
7352 /* To avoid client trying to guess when to poll again for information we
7353 * calculate conn info age as random value between min/max set in hdev.
7354 */
7355 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7356 hdev->conn_info_max_age - 1);
7357
7358 /* Query controller to refresh cached values if they are too old or were
7359 * never read.
7360 */
7361 if (time_after(jiffies, conn->conn_info_timestamp +
7362 msecs_to_jiffies(conn_info_age)) ||
7363 !conn->conn_info_timestamp) {
7364 struct mgmt_pending_cmd *cmd;
7365
7366 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7367 len);
7368 if (!cmd) {
7369 err = -ENOMEM;
7370 } else {
7371 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7372 cmd, get_conn_info_complete);
7373 }
7374
7375 if (err < 0) {
7376 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7377 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7378
7379 if (cmd)
7380 mgmt_pending_free(cmd);
7381
7382 goto unlock;
7383 }
7384
7385 conn->conn_info_timestamp = jiffies;
7386 } else {
7387 /* Cache is valid, just reply with values cached in hci_conn */
7388 rp.rssi = conn->rssi;
7389 rp.tx_power = conn->tx_power;
7390 rp.max_tx_power = conn->max_tx_power;
7391
7392 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7393 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7394 }
7395
7396 unlock:
7397 hci_dev_unlock(hdev);
7398 return err;
7399 }
7400
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7401 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7402 {
7403 struct mgmt_pending_cmd *cmd = data;
7404 struct mgmt_cp_get_clock_info *cp = cmd->param;
7405 struct mgmt_rp_get_clock_info rp;
7406 struct hci_conn *conn = cmd->user_data;
7407 u8 status = mgmt_status(err);
7408
7409 bt_dev_dbg(hdev, "err %d", err);
7410
7411 memset(&rp, 0, sizeof(rp));
7412 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7413 rp.addr.type = cp->addr.type;
7414
7415 if (err)
7416 goto complete;
7417
7418 rp.local_clock = cpu_to_le32(hdev->clock);
7419
7420 if (conn) {
7421 rp.piconet_clock = cpu_to_le32(conn->clock);
7422 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7423 }
7424
7425 complete:
7426 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7427 sizeof(rp));
7428
7429 mgmt_pending_free(cmd);
7430 }
7431
get_clock_info_sync(struct hci_dev * hdev,void * data)7432 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7433 {
7434 struct mgmt_pending_cmd *cmd = data;
7435 struct mgmt_cp_get_clock_info *cp = cmd->param;
7436 struct hci_cp_read_clock hci_cp;
7437 struct hci_conn *conn;
7438
7439 memset(&hci_cp, 0, sizeof(hci_cp));
7440 hci_read_clock_sync(hdev, &hci_cp);
7441
7442 /* Make sure connection still exists */
7443 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7444 if (!conn || conn->state != BT_CONNECTED)
7445 return MGMT_STATUS_NOT_CONNECTED;
7446
7447 cmd->user_data = conn;
7448 hci_cp.handle = cpu_to_le16(conn->handle);
7449 hci_cp.which = 0x01; /* Piconet clock */
7450
7451 return hci_read_clock_sync(hdev, &hci_cp);
7452 }
7453
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7454 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7455 u16 len)
7456 {
7457 struct mgmt_cp_get_clock_info *cp = data;
7458 struct mgmt_rp_get_clock_info rp;
7459 struct mgmt_pending_cmd *cmd;
7460 struct hci_conn *conn;
7461 int err;
7462
7463 bt_dev_dbg(hdev, "sock %p", sk);
7464
7465 memset(&rp, 0, sizeof(rp));
7466 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7467 rp.addr.type = cp->addr.type;
7468
7469 if (cp->addr.type != BDADDR_BREDR)
7470 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7471 MGMT_STATUS_INVALID_PARAMS,
7472 &rp, sizeof(rp));
7473
7474 hci_dev_lock(hdev);
7475
7476 if (!hdev_is_powered(hdev)) {
7477 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7478 MGMT_STATUS_NOT_POWERED, &rp,
7479 sizeof(rp));
7480 goto unlock;
7481 }
7482
7483 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7484 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7485 &cp->addr.bdaddr);
7486 if (!conn || conn->state != BT_CONNECTED) {
7487 err = mgmt_cmd_complete(sk, hdev->id,
7488 MGMT_OP_GET_CLOCK_INFO,
7489 MGMT_STATUS_NOT_CONNECTED,
7490 &rp, sizeof(rp));
7491 goto unlock;
7492 }
7493 } else {
7494 conn = NULL;
7495 }
7496
7497 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7498 if (!cmd)
7499 err = -ENOMEM;
7500 else
7501 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7502 get_clock_info_complete);
7503
7504 if (err < 0) {
7505 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7506 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7507
7508 if (cmd)
7509 mgmt_pending_free(cmd);
7510 }
7511
7512
7513 unlock:
7514 hci_dev_unlock(hdev);
7515 return err;
7516 }
7517
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7518 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7519 {
7520 struct hci_conn *conn;
7521
7522 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7523 if (!conn)
7524 return false;
7525
7526 if (conn->dst_type != type)
7527 return false;
7528
7529 if (conn->state != BT_CONNECTED)
7530 return false;
7531
7532 return true;
7533 }
7534
7535 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7536 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7537 u8 addr_type, u8 auto_connect)
7538 {
7539 struct hci_conn_params *params;
7540
7541 params = hci_conn_params_add(hdev, addr, addr_type);
7542 if (!params)
7543 return -EIO;
7544
7545 if (params->auto_connect == auto_connect)
7546 return 0;
7547
7548 hci_pend_le_list_del_init(params);
7549
7550 switch (auto_connect) {
7551 case HCI_AUTO_CONN_DISABLED:
7552 case HCI_AUTO_CONN_LINK_LOSS:
7553 /* If auto connect is being disabled when we're trying to
7554 * connect to device, keep connecting.
7555 */
7556 if (params->explicit_connect)
7557 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7558 break;
7559 case HCI_AUTO_CONN_REPORT:
7560 if (params->explicit_connect)
7561 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7562 else
7563 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7564 break;
7565 case HCI_AUTO_CONN_DIRECT:
7566 case HCI_AUTO_CONN_ALWAYS:
7567 if (!is_connected(hdev, addr, addr_type))
7568 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7569 break;
7570 }
7571
7572 params->auto_connect = auto_connect;
7573
7574 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7575 addr, addr_type, auto_connect);
7576
7577 return 0;
7578 }
7579
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7580 static void device_added(struct sock *sk, struct hci_dev *hdev,
7581 bdaddr_t *bdaddr, u8 type, u8 action)
7582 {
7583 struct mgmt_ev_device_added ev;
7584
7585 bacpy(&ev.addr.bdaddr, bdaddr);
7586 ev.addr.type = type;
7587 ev.action = action;
7588
7589 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7590 }
7591
add_device_sync(struct hci_dev * hdev,void * data)7592 static int add_device_sync(struct hci_dev *hdev, void *data)
7593 {
7594 return hci_update_passive_scan_sync(hdev);
7595 }
7596
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7597 static int add_device(struct sock *sk, struct hci_dev *hdev,
7598 void *data, u16 len)
7599 {
7600 struct mgmt_cp_add_device *cp = data;
7601 u8 auto_conn, addr_type;
7602 struct hci_conn_params *params;
7603 int err;
7604 u32 current_flags = 0;
7605 u32 supported_flags;
7606
7607 bt_dev_dbg(hdev, "sock %p", sk);
7608
7609 if (!bdaddr_type_is_valid(cp->addr.type) ||
7610 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7611 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7612 MGMT_STATUS_INVALID_PARAMS,
7613 &cp->addr, sizeof(cp->addr));
7614
7615 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7616 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7617 MGMT_STATUS_INVALID_PARAMS,
7618 &cp->addr, sizeof(cp->addr));
7619
7620 hci_dev_lock(hdev);
7621
7622 if (cp->addr.type == BDADDR_BREDR) {
7623 /* Only incoming connections action is supported for now */
7624 if (cp->action != 0x01) {
7625 err = mgmt_cmd_complete(sk, hdev->id,
7626 MGMT_OP_ADD_DEVICE,
7627 MGMT_STATUS_INVALID_PARAMS,
7628 &cp->addr, sizeof(cp->addr));
7629 goto unlock;
7630 }
7631
7632 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7633 &cp->addr.bdaddr,
7634 cp->addr.type, 0);
7635 if (err)
7636 goto unlock;
7637
7638 hci_update_scan(hdev);
7639
7640 goto added;
7641 }
7642
7643 addr_type = le_addr_type(cp->addr.type);
7644
7645 if (cp->action == 0x02)
7646 auto_conn = HCI_AUTO_CONN_ALWAYS;
7647 else if (cp->action == 0x01)
7648 auto_conn = HCI_AUTO_CONN_DIRECT;
7649 else
7650 auto_conn = HCI_AUTO_CONN_REPORT;
7651
7652 /* Kernel internally uses conn_params with resolvable private
7653 * address, but Add Device allows only identity addresses.
7654 * Make sure it is enforced before calling
7655 * hci_conn_params_lookup.
7656 */
7657 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7658 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7659 MGMT_STATUS_INVALID_PARAMS,
7660 &cp->addr, sizeof(cp->addr));
7661 goto unlock;
7662 }
7663
7664 /* If the connection parameters don't exist for this device,
7665 * they will be created and configured with defaults.
7666 */
7667 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7668 auto_conn) < 0) {
7669 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7670 MGMT_STATUS_FAILED, &cp->addr,
7671 sizeof(cp->addr));
7672 goto unlock;
7673 } else {
7674 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7675 addr_type);
7676 if (params)
7677 current_flags = params->flags;
7678 }
7679
7680 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7681 if (err < 0)
7682 goto unlock;
7683
7684 added:
7685 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7686 supported_flags = hdev->conn_flags;
7687 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7688 supported_flags, current_flags);
7689
7690 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7691 MGMT_STATUS_SUCCESS, &cp->addr,
7692 sizeof(cp->addr));
7693
7694 unlock:
7695 hci_dev_unlock(hdev);
7696 return err;
7697 }
7698
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7699 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7700 bdaddr_t *bdaddr, u8 type)
7701 {
7702 struct mgmt_ev_device_removed ev;
7703
7704 bacpy(&ev.addr.bdaddr, bdaddr);
7705 ev.addr.type = type;
7706
7707 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7708 }
7709
remove_device_sync(struct hci_dev * hdev,void * data)7710 static int remove_device_sync(struct hci_dev *hdev, void *data)
7711 {
7712 return hci_update_passive_scan_sync(hdev);
7713 }
7714
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7715 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7716 void *data, u16 len)
7717 {
7718 struct mgmt_cp_remove_device *cp = data;
7719 int err;
7720
7721 bt_dev_dbg(hdev, "sock %p", sk);
7722
7723 hci_dev_lock(hdev);
7724
7725 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7726 struct hci_conn_params *params;
7727 u8 addr_type;
7728
7729 if (!bdaddr_type_is_valid(cp->addr.type)) {
7730 err = mgmt_cmd_complete(sk, hdev->id,
7731 MGMT_OP_REMOVE_DEVICE,
7732 MGMT_STATUS_INVALID_PARAMS,
7733 &cp->addr, sizeof(cp->addr));
7734 goto unlock;
7735 }
7736
7737 if (cp->addr.type == BDADDR_BREDR) {
7738 err = hci_bdaddr_list_del(&hdev->accept_list,
7739 &cp->addr.bdaddr,
7740 cp->addr.type);
7741 if (err) {
7742 err = mgmt_cmd_complete(sk, hdev->id,
7743 MGMT_OP_REMOVE_DEVICE,
7744 MGMT_STATUS_INVALID_PARAMS,
7745 &cp->addr,
7746 sizeof(cp->addr));
7747 goto unlock;
7748 }
7749
7750 hci_update_scan(hdev);
7751
7752 device_removed(sk, hdev, &cp->addr.bdaddr,
7753 cp->addr.type);
7754 goto complete;
7755 }
7756
7757 addr_type = le_addr_type(cp->addr.type);
7758
7759 /* Kernel internally uses conn_params with resolvable private
7760 * address, but Remove Device allows only identity addresses.
7761 * Make sure it is enforced before calling
7762 * hci_conn_params_lookup.
7763 */
7764 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7765 err = mgmt_cmd_complete(sk, hdev->id,
7766 MGMT_OP_REMOVE_DEVICE,
7767 MGMT_STATUS_INVALID_PARAMS,
7768 &cp->addr, sizeof(cp->addr));
7769 goto unlock;
7770 }
7771
7772 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7773 addr_type);
7774 if (!params) {
7775 err = mgmt_cmd_complete(sk, hdev->id,
7776 MGMT_OP_REMOVE_DEVICE,
7777 MGMT_STATUS_INVALID_PARAMS,
7778 &cp->addr, sizeof(cp->addr));
7779 goto unlock;
7780 }
7781
7782 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7783 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7784 err = mgmt_cmd_complete(sk, hdev->id,
7785 MGMT_OP_REMOVE_DEVICE,
7786 MGMT_STATUS_INVALID_PARAMS,
7787 &cp->addr, sizeof(cp->addr));
7788 goto unlock;
7789 }
7790
7791 hci_conn_params_free(params);
7792
7793 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7794 } else {
7795 struct hci_conn_params *p, *tmp;
7796 struct bdaddr_list *b, *btmp;
7797
7798 if (cp->addr.type) {
7799 err = mgmt_cmd_complete(sk, hdev->id,
7800 MGMT_OP_REMOVE_DEVICE,
7801 MGMT_STATUS_INVALID_PARAMS,
7802 &cp->addr, sizeof(cp->addr));
7803 goto unlock;
7804 }
7805
7806 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7807 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7808 list_del(&b->list);
7809 kfree(b);
7810 }
7811
7812 hci_update_scan(hdev);
7813
7814 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7815 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7816 continue;
7817 device_removed(sk, hdev, &p->addr, p->addr_type);
7818 if (p->explicit_connect) {
7819 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7820 continue;
7821 }
7822 hci_conn_params_free(p);
7823 }
7824
7825 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7826 }
7827
7828 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7829
7830 complete:
7831 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7832 MGMT_STATUS_SUCCESS, &cp->addr,
7833 sizeof(cp->addr));
7834 unlock:
7835 hci_dev_unlock(hdev);
7836 return err;
7837 }
7838
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7839 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7840 u16 len)
7841 {
7842 struct mgmt_cp_load_conn_param *cp = data;
7843 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7844 sizeof(struct mgmt_conn_param));
7845 u16 param_count, expected_len;
7846 int i;
7847
7848 if (!lmp_le_capable(hdev))
7849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7850 MGMT_STATUS_NOT_SUPPORTED);
7851
7852 param_count = __le16_to_cpu(cp->param_count);
7853 if (param_count > max_param_count) {
7854 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7855 param_count);
7856 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7857 MGMT_STATUS_INVALID_PARAMS);
7858 }
7859
7860 expected_len = struct_size(cp, params, param_count);
7861 if (expected_len != len) {
7862 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7863 expected_len, len);
7864 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7865 MGMT_STATUS_INVALID_PARAMS);
7866 }
7867
7868 bt_dev_dbg(hdev, "param_count %u", param_count);
7869
7870 hci_dev_lock(hdev);
7871
7872 hci_conn_params_clear_disabled(hdev);
7873
7874 for (i = 0; i < param_count; i++) {
7875 struct mgmt_conn_param *param = &cp->params[i];
7876 struct hci_conn_params *hci_param;
7877 u16 min, max, latency, timeout;
7878 u8 addr_type;
7879
7880 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7881 param->addr.type);
7882
7883 if (param->addr.type == BDADDR_LE_PUBLIC) {
7884 addr_type = ADDR_LE_DEV_PUBLIC;
7885 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7886 addr_type = ADDR_LE_DEV_RANDOM;
7887 } else {
7888 bt_dev_err(hdev, "ignoring invalid connection parameters");
7889 continue;
7890 }
7891
7892 min = le16_to_cpu(param->min_interval);
7893 max = le16_to_cpu(param->max_interval);
7894 latency = le16_to_cpu(param->latency);
7895 timeout = le16_to_cpu(param->timeout);
7896
7897 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7898 min, max, latency, timeout);
7899
7900 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7901 bt_dev_err(hdev, "ignoring invalid connection parameters");
7902 continue;
7903 }
7904
7905 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7906 addr_type);
7907 if (!hci_param) {
7908 bt_dev_err(hdev, "failed to add connection parameters");
7909 continue;
7910 }
7911
7912 hci_param->conn_min_interval = min;
7913 hci_param->conn_max_interval = max;
7914 hci_param->conn_latency = latency;
7915 hci_param->supervision_timeout = timeout;
7916 }
7917
7918 hci_dev_unlock(hdev);
7919
7920 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7921 NULL, 0);
7922 }
7923
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7924 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7925 void *data, u16 len)
7926 {
7927 struct mgmt_cp_set_external_config *cp = data;
7928 bool changed;
7929 int err;
7930
7931 bt_dev_dbg(hdev, "sock %p", sk);
7932
7933 if (hdev_is_powered(hdev))
7934 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7935 MGMT_STATUS_REJECTED);
7936
7937 if (cp->config != 0x00 && cp->config != 0x01)
7938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7939 MGMT_STATUS_INVALID_PARAMS);
7940
7941 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7942 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7943 MGMT_STATUS_NOT_SUPPORTED);
7944
7945 hci_dev_lock(hdev);
7946
7947 if (cp->config)
7948 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7949 else
7950 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7951
7952 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7953 if (err < 0)
7954 goto unlock;
7955
7956 if (!changed)
7957 goto unlock;
7958
7959 err = new_options(hdev, sk);
7960
7961 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7962 mgmt_index_removed(hdev);
7963
7964 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7965 hci_dev_set_flag(hdev, HCI_CONFIG);
7966 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7967
7968 queue_work(hdev->req_workqueue, &hdev->power_on);
7969 } else {
7970 set_bit(HCI_RAW, &hdev->flags);
7971 mgmt_index_added(hdev);
7972 }
7973 }
7974
7975 unlock:
7976 hci_dev_unlock(hdev);
7977 return err;
7978 }
7979
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7980 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7981 void *data, u16 len)
7982 {
7983 struct mgmt_cp_set_public_address *cp = data;
7984 bool changed;
7985 int err;
7986
7987 bt_dev_dbg(hdev, "sock %p", sk);
7988
7989 if (hdev_is_powered(hdev))
7990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7991 MGMT_STATUS_REJECTED);
7992
7993 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7995 MGMT_STATUS_INVALID_PARAMS);
7996
7997 if (!hdev->set_bdaddr)
7998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7999 MGMT_STATUS_NOT_SUPPORTED);
8000
8001 hci_dev_lock(hdev);
8002
8003 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8004 bacpy(&hdev->public_addr, &cp->bdaddr);
8005
8006 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8007 if (err < 0)
8008 goto unlock;
8009
8010 if (!changed)
8011 goto unlock;
8012
8013 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8014 err = new_options(hdev, sk);
8015
8016 if (is_configured(hdev)) {
8017 mgmt_index_removed(hdev);
8018
8019 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8020
8021 hci_dev_set_flag(hdev, HCI_CONFIG);
8022 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8023
8024 queue_work(hdev->req_workqueue, &hdev->power_on);
8025 }
8026
8027 unlock:
8028 hci_dev_unlock(hdev);
8029 return err;
8030 }
8031
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8032 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8033 int err)
8034 {
8035 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8036 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8037 u8 *h192, *r192, *h256, *r256;
8038 struct mgmt_pending_cmd *cmd = data;
8039 struct sk_buff *skb = cmd->skb;
8040 u8 status = mgmt_status(err);
8041 u16 eir_len;
8042
8043 if (err == -ECANCELED ||
8044 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8045 return;
8046
8047 if (!status) {
8048 if (!skb)
8049 status = MGMT_STATUS_FAILED;
8050 else if (IS_ERR(skb))
8051 status = mgmt_status(PTR_ERR(skb));
8052 else
8053 status = mgmt_status(skb->data[0]);
8054 }
8055
8056 bt_dev_dbg(hdev, "status %u", status);
8057
8058 mgmt_cp = cmd->param;
8059
8060 if (status) {
8061 status = mgmt_status(status);
8062 eir_len = 0;
8063
8064 h192 = NULL;
8065 r192 = NULL;
8066 h256 = NULL;
8067 r256 = NULL;
8068 } else if (!bredr_sc_enabled(hdev)) {
8069 struct hci_rp_read_local_oob_data *rp;
8070
8071 if (skb->len != sizeof(*rp)) {
8072 status = MGMT_STATUS_FAILED;
8073 eir_len = 0;
8074 } else {
8075 status = MGMT_STATUS_SUCCESS;
8076 rp = (void *)skb->data;
8077
8078 eir_len = 5 + 18 + 18;
8079 h192 = rp->hash;
8080 r192 = rp->rand;
8081 h256 = NULL;
8082 r256 = NULL;
8083 }
8084 } else {
8085 struct hci_rp_read_local_oob_ext_data *rp;
8086
8087 if (skb->len != sizeof(*rp)) {
8088 status = MGMT_STATUS_FAILED;
8089 eir_len = 0;
8090 } else {
8091 status = MGMT_STATUS_SUCCESS;
8092 rp = (void *)skb->data;
8093
8094 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8095 eir_len = 5 + 18 + 18;
8096 h192 = NULL;
8097 r192 = NULL;
8098 } else {
8099 eir_len = 5 + 18 + 18 + 18 + 18;
8100 h192 = rp->hash192;
8101 r192 = rp->rand192;
8102 }
8103
8104 h256 = rp->hash256;
8105 r256 = rp->rand256;
8106 }
8107 }
8108
8109 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8110 if (!mgmt_rp)
8111 goto done;
8112
8113 if (eir_len == 0)
8114 goto send_rsp;
8115
8116 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8117 hdev->dev_class, 3);
8118
8119 if (h192 && r192) {
8120 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8121 EIR_SSP_HASH_C192, h192, 16);
8122 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8123 EIR_SSP_RAND_R192, r192, 16);
8124 }
8125
8126 if (h256 && r256) {
8127 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8128 EIR_SSP_HASH_C256, h256, 16);
8129 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8130 EIR_SSP_RAND_R256, r256, 16);
8131 }
8132
8133 send_rsp:
8134 mgmt_rp->type = mgmt_cp->type;
8135 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8136
8137 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8138 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8139 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8140 if (err < 0 || status)
8141 goto done;
8142
8143 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8144
8145 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8146 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8147 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8148 done:
8149 if (skb && !IS_ERR(skb))
8150 kfree_skb(skb);
8151
8152 kfree(mgmt_rp);
8153 mgmt_pending_remove(cmd);
8154 }
8155
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8156 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8157 struct mgmt_cp_read_local_oob_ext_data *cp)
8158 {
8159 struct mgmt_pending_cmd *cmd;
8160 int err;
8161
8162 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8163 cp, sizeof(*cp));
8164 if (!cmd)
8165 return -ENOMEM;
8166
8167 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8168 read_local_oob_ext_data_complete);
8169
8170 if (err < 0) {
8171 mgmt_pending_remove(cmd);
8172 return err;
8173 }
8174
8175 return 0;
8176 }
8177
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8178 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8179 void *data, u16 data_len)
8180 {
8181 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8182 struct mgmt_rp_read_local_oob_ext_data *rp;
8183 size_t rp_len;
8184 u16 eir_len;
8185 u8 status, flags, role, addr[7], hash[16], rand[16];
8186 int err;
8187
8188 bt_dev_dbg(hdev, "sock %p", sk);
8189
8190 if (hdev_is_powered(hdev)) {
8191 switch (cp->type) {
8192 case BIT(BDADDR_BREDR):
8193 status = mgmt_bredr_support(hdev);
8194 if (status)
8195 eir_len = 0;
8196 else
8197 eir_len = 5;
8198 break;
8199 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8200 status = mgmt_le_support(hdev);
8201 if (status)
8202 eir_len = 0;
8203 else
8204 eir_len = 9 + 3 + 18 + 18 + 3;
8205 break;
8206 default:
8207 status = MGMT_STATUS_INVALID_PARAMS;
8208 eir_len = 0;
8209 break;
8210 }
8211 } else {
8212 status = MGMT_STATUS_NOT_POWERED;
8213 eir_len = 0;
8214 }
8215
8216 rp_len = sizeof(*rp) + eir_len;
8217 rp = kmalloc(rp_len, GFP_ATOMIC);
8218 if (!rp)
8219 return -ENOMEM;
8220
8221 if (!status && !lmp_ssp_capable(hdev)) {
8222 status = MGMT_STATUS_NOT_SUPPORTED;
8223 eir_len = 0;
8224 }
8225
8226 if (status)
8227 goto complete;
8228
8229 hci_dev_lock(hdev);
8230
8231 eir_len = 0;
8232 switch (cp->type) {
8233 case BIT(BDADDR_BREDR):
8234 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8235 err = read_local_ssp_oob_req(hdev, sk, cp);
8236 hci_dev_unlock(hdev);
8237 if (!err)
8238 goto done;
8239
8240 status = MGMT_STATUS_FAILED;
8241 goto complete;
8242 } else {
8243 eir_len = eir_append_data(rp->eir, eir_len,
8244 EIR_CLASS_OF_DEV,
8245 hdev->dev_class, 3);
8246 }
8247 break;
8248 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8249 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8250 smp_generate_oob(hdev, hash, rand) < 0) {
8251 hci_dev_unlock(hdev);
8252 status = MGMT_STATUS_FAILED;
8253 goto complete;
8254 }
8255
8256 /* This should return the active RPA, but since the RPA
8257 * is only programmed on demand, it is really hard to fill
8258 * this in at the moment. For now disallow retrieving
8259 * local out-of-band data when privacy is in use.
8260 *
8261 * Returning the identity address will not help here since
8262 * pairing happens before the identity resolving key is
8263 * known and thus the connection establishment happens
8264 * based on the RPA and not the identity address.
8265 */
8266 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8267 hci_dev_unlock(hdev);
8268 status = MGMT_STATUS_REJECTED;
8269 goto complete;
8270 }
8271
8272 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8273 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8274 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8275 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8276 memcpy(addr, &hdev->static_addr, 6);
8277 addr[6] = 0x01;
8278 } else {
8279 memcpy(addr, &hdev->bdaddr, 6);
8280 addr[6] = 0x00;
8281 }
8282
8283 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8284 addr, sizeof(addr));
8285
8286 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8287 role = 0x02;
8288 else
8289 role = 0x01;
8290
8291 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8292 &role, sizeof(role));
8293
8294 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8295 eir_len = eir_append_data(rp->eir, eir_len,
8296 EIR_LE_SC_CONFIRM,
8297 hash, sizeof(hash));
8298
8299 eir_len = eir_append_data(rp->eir, eir_len,
8300 EIR_LE_SC_RANDOM,
8301 rand, sizeof(rand));
8302 }
8303
8304 flags = mgmt_get_adv_discov_flags(hdev);
8305
8306 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8307 flags |= LE_AD_NO_BREDR;
8308
8309 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8310 &flags, sizeof(flags));
8311 break;
8312 }
8313
8314 hci_dev_unlock(hdev);
8315
8316 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8317
8318 status = MGMT_STATUS_SUCCESS;
8319
8320 complete:
8321 rp->type = cp->type;
8322 rp->eir_len = cpu_to_le16(eir_len);
8323
8324 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8325 status, rp, sizeof(*rp) + eir_len);
8326 if (err < 0 || status)
8327 goto done;
8328
8329 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8330 rp, sizeof(*rp) + eir_len,
8331 HCI_MGMT_OOB_DATA_EVENTS, sk);
8332
8333 done:
8334 kfree(rp);
8335
8336 return err;
8337 }
8338
get_supported_adv_flags(struct hci_dev * hdev)8339 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8340 {
8341 u32 flags = 0;
8342
8343 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8344 flags |= MGMT_ADV_FLAG_DISCOV;
8345 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8346 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8347 flags |= MGMT_ADV_FLAG_APPEARANCE;
8348 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8349 flags |= MGMT_ADV_PARAM_DURATION;
8350 flags |= MGMT_ADV_PARAM_TIMEOUT;
8351 flags |= MGMT_ADV_PARAM_INTERVALS;
8352 flags |= MGMT_ADV_PARAM_TX_POWER;
8353 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8354
8355 /* In extended adv TX_POWER returned from Set Adv Param
8356 * will be always valid.
8357 */
8358 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8359 flags |= MGMT_ADV_FLAG_TX_POWER;
8360
8361 if (ext_adv_capable(hdev)) {
8362 flags |= MGMT_ADV_FLAG_SEC_1M;
8363 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8364 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8365
8366 if (le_2m_capable(hdev))
8367 flags |= MGMT_ADV_FLAG_SEC_2M;
8368
8369 if (le_coded_capable(hdev))
8370 flags |= MGMT_ADV_FLAG_SEC_CODED;
8371 }
8372
8373 return flags;
8374 }
8375
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8376 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8377 void *data, u16 data_len)
8378 {
8379 struct mgmt_rp_read_adv_features *rp;
8380 size_t rp_len;
8381 int err;
8382 struct adv_info *adv_instance;
8383 u32 supported_flags;
8384 u8 *instance;
8385
8386 bt_dev_dbg(hdev, "sock %p", sk);
8387
8388 if (!lmp_le_capable(hdev))
8389 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8390 MGMT_STATUS_REJECTED);
8391
8392 hci_dev_lock(hdev);
8393
8394 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8395 rp = kmalloc(rp_len, GFP_ATOMIC);
8396 if (!rp) {
8397 hci_dev_unlock(hdev);
8398 return -ENOMEM;
8399 }
8400
8401 supported_flags = get_supported_adv_flags(hdev);
8402
8403 rp->supported_flags = cpu_to_le32(supported_flags);
8404 rp->max_adv_data_len = max_adv_len(hdev);
8405 rp->max_scan_rsp_len = max_adv_len(hdev);
8406 rp->max_instances = hdev->le_num_of_adv_sets;
8407 rp->num_instances = hdev->adv_instance_cnt;
8408
8409 instance = rp->instance;
8410 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8411 /* Only instances 1-le_num_of_adv_sets are externally visible */
8412 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8413 *instance = adv_instance->instance;
8414 instance++;
8415 } else {
8416 rp->num_instances--;
8417 rp_len--;
8418 }
8419 }
8420
8421 hci_dev_unlock(hdev);
8422
8423 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8424 MGMT_STATUS_SUCCESS, rp, rp_len);
8425
8426 kfree(rp);
8427
8428 return err;
8429 }
8430
calculate_name_len(struct hci_dev * hdev)8431 static u8 calculate_name_len(struct hci_dev *hdev)
8432 {
8433 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8434
8435 return eir_append_local_name(hdev, buf, 0);
8436 }
8437
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8438 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8439 bool is_adv_data)
8440 {
8441 u8 max_len = max_adv_len(hdev);
8442
8443 if (is_adv_data) {
8444 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8445 MGMT_ADV_FLAG_LIMITED_DISCOV |
8446 MGMT_ADV_FLAG_MANAGED_FLAGS))
8447 max_len -= 3;
8448
8449 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8450 max_len -= 3;
8451 } else {
8452 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8453 max_len -= calculate_name_len(hdev);
8454
8455 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8456 max_len -= 4;
8457 }
8458
8459 return max_len;
8460 }
8461
flags_managed(u32 adv_flags)8462 static bool flags_managed(u32 adv_flags)
8463 {
8464 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8465 MGMT_ADV_FLAG_LIMITED_DISCOV |
8466 MGMT_ADV_FLAG_MANAGED_FLAGS);
8467 }
8468
tx_power_managed(u32 adv_flags)8469 static bool tx_power_managed(u32 adv_flags)
8470 {
8471 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8472 }
8473
name_managed(u32 adv_flags)8474 static bool name_managed(u32 adv_flags)
8475 {
8476 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8477 }
8478
appearance_managed(u32 adv_flags)8479 static bool appearance_managed(u32 adv_flags)
8480 {
8481 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8482 }
8483
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8484 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8485 u8 len, bool is_adv_data)
8486 {
8487 int i, cur_len;
8488 u8 max_len;
8489
8490 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8491
8492 if (len > max_len)
8493 return false;
8494
8495 /* Make sure that the data is correctly formatted. */
8496 for (i = 0; i < len; i += (cur_len + 1)) {
8497 cur_len = data[i];
8498
8499 if (!cur_len)
8500 continue;
8501
8502 if (data[i + 1] == EIR_FLAGS &&
8503 (!is_adv_data || flags_managed(adv_flags)))
8504 return false;
8505
8506 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8507 return false;
8508
8509 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8510 return false;
8511
8512 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8513 return false;
8514
8515 if (data[i + 1] == EIR_APPEARANCE &&
8516 appearance_managed(adv_flags))
8517 return false;
8518
8519 /* If the current field length would exceed the total data
8520 * length, then it's invalid.
8521 */
8522 if (i + cur_len >= len)
8523 return false;
8524 }
8525
8526 return true;
8527 }
8528
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8529 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8530 {
8531 u32 supported_flags, phy_flags;
8532
8533 /* The current implementation only supports a subset of the specified
8534 * flags. Also need to check mutual exclusiveness of sec flags.
8535 */
8536 supported_flags = get_supported_adv_flags(hdev);
8537 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8538 if (adv_flags & ~supported_flags ||
8539 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8540 return false;
8541
8542 return true;
8543 }
8544
adv_busy(struct hci_dev * hdev)8545 static bool adv_busy(struct hci_dev *hdev)
8546 {
8547 return pending_find(MGMT_OP_SET_LE, hdev);
8548 }
8549
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8550 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8551 int err)
8552 {
8553 struct adv_info *adv, *n;
8554
8555 bt_dev_dbg(hdev, "err %d", err);
8556
8557 hci_dev_lock(hdev);
8558
8559 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8560 u8 instance;
8561
8562 if (!adv->pending)
8563 continue;
8564
8565 if (!err) {
8566 adv->pending = false;
8567 continue;
8568 }
8569
8570 instance = adv->instance;
8571
8572 if (hdev->cur_adv_instance == instance)
8573 cancel_adv_timeout(hdev);
8574
8575 hci_remove_adv_instance(hdev, instance);
8576 mgmt_advertising_removed(sk, hdev, instance);
8577 }
8578
8579 hci_dev_unlock(hdev);
8580 }
8581
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8582 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8583 {
8584 struct mgmt_pending_cmd *cmd = data;
8585 struct mgmt_cp_add_advertising *cp = cmd->param;
8586 struct mgmt_rp_add_advertising rp;
8587
8588 memset(&rp, 0, sizeof(rp));
8589
8590 rp.instance = cp->instance;
8591
8592 if (err)
8593 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8594 mgmt_status(err));
8595 else
8596 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8597 mgmt_status(err), &rp, sizeof(rp));
8598
8599 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8600
8601 mgmt_pending_free(cmd);
8602 }
8603
add_advertising_sync(struct hci_dev * hdev,void * data)8604 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8605 {
8606 struct mgmt_pending_cmd *cmd = data;
8607 struct mgmt_cp_add_advertising *cp = cmd->param;
8608
8609 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8610 }
8611
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8612 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8613 void *data, u16 data_len)
8614 {
8615 struct mgmt_cp_add_advertising *cp = data;
8616 struct mgmt_rp_add_advertising rp;
8617 u32 flags;
8618 u8 status;
8619 u16 timeout, duration;
8620 unsigned int prev_instance_cnt;
8621 u8 schedule_instance = 0;
8622 struct adv_info *adv, *next_instance;
8623 int err;
8624 struct mgmt_pending_cmd *cmd;
8625
8626 bt_dev_dbg(hdev, "sock %p", sk);
8627
8628 status = mgmt_le_support(hdev);
8629 if (status)
8630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8631 status);
8632
8633 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8635 MGMT_STATUS_INVALID_PARAMS);
8636
8637 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8639 MGMT_STATUS_INVALID_PARAMS);
8640
8641 flags = __le32_to_cpu(cp->flags);
8642 timeout = __le16_to_cpu(cp->timeout);
8643 duration = __le16_to_cpu(cp->duration);
8644
8645 if (!requested_adv_flags_are_valid(hdev, flags))
8646 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8647 MGMT_STATUS_INVALID_PARAMS);
8648
8649 hci_dev_lock(hdev);
8650
8651 if (timeout && !hdev_is_powered(hdev)) {
8652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8653 MGMT_STATUS_REJECTED);
8654 goto unlock;
8655 }
8656
8657 if (adv_busy(hdev)) {
8658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8659 MGMT_STATUS_BUSY);
8660 goto unlock;
8661 }
8662
8663 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8664 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8665 cp->scan_rsp_len, false)) {
8666 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8667 MGMT_STATUS_INVALID_PARAMS);
8668 goto unlock;
8669 }
8670
8671 prev_instance_cnt = hdev->adv_instance_cnt;
8672
8673 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8674 cp->adv_data_len, cp->data,
8675 cp->scan_rsp_len,
8676 cp->data + cp->adv_data_len,
8677 timeout, duration,
8678 HCI_ADV_TX_POWER_NO_PREFERENCE,
8679 hdev->le_adv_min_interval,
8680 hdev->le_adv_max_interval, 0);
8681 if (IS_ERR(adv)) {
8682 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8683 MGMT_STATUS_FAILED);
8684 goto unlock;
8685 }
8686
8687 /* Only trigger an advertising added event if a new instance was
8688 * actually added.
8689 */
8690 if (hdev->adv_instance_cnt > prev_instance_cnt)
8691 mgmt_advertising_added(sk, hdev, cp->instance);
8692
8693 if (hdev->cur_adv_instance == cp->instance) {
8694 /* If the currently advertised instance is being changed then
8695 * cancel the current advertising and schedule the next
8696 * instance. If there is only one instance then the overridden
8697 * advertising data will be visible right away.
8698 */
8699 cancel_adv_timeout(hdev);
8700
8701 next_instance = hci_get_next_instance(hdev, cp->instance);
8702 if (next_instance)
8703 schedule_instance = next_instance->instance;
8704 } else if (!hdev->adv_instance_timeout) {
8705 /* Immediately advertise the new instance if no other
8706 * instance is currently being advertised.
8707 */
8708 schedule_instance = cp->instance;
8709 }
8710
8711 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8712 * there is no instance to be advertised then we have no HCI
8713 * communication to make. Simply return.
8714 */
8715 if (!hdev_is_powered(hdev) ||
8716 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8717 !schedule_instance) {
8718 rp.instance = cp->instance;
8719 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8720 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8721 goto unlock;
8722 }
8723
8724 /* We're good to go, update advertising data, parameters, and start
8725 * advertising.
8726 */
8727 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8728 data_len);
8729 if (!cmd) {
8730 err = -ENOMEM;
8731 goto unlock;
8732 }
8733
8734 cp->instance = schedule_instance;
8735
8736 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8737 add_advertising_complete);
8738 if (err < 0)
8739 mgmt_pending_free(cmd);
8740
8741 unlock:
8742 hci_dev_unlock(hdev);
8743
8744 return err;
8745 }
8746
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8747 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8748 int err)
8749 {
8750 struct mgmt_pending_cmd *cmd = data;
8751 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8752 struct mgmt_rp_add_ext_adv_params rp;
8753 struct adv_info *adv;
8754 u32 flags;
8755
8756 BT_DBG("%s", hdev->name);
8757
8758 hci_dev_lock(hdev);
8759
8760 adv = hci_find_adv_instance(hdev, cp->instance);
8761 if (!adv)
8762 goto unlock;
8763
8764 rp.instance = cp->instance;
8765 rp.tx_power = adv->tx_power;
8766
8767 /* While we're at it, inform userspace of the available space for this
8768 * advertisement, given the flags that will be used.
8769 */
8770 flags = __le32_to_cpu(cp->flags);
8771 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8772 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8773
8774 if (err) {
8775 /* If this advertisement was previously advertising and we
8776 * failed to update it, we signal that it has been removed and
8777 * delete its structure
8778 */
8779 if (!adv->pending)
8780 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8781
8782 hci_remove_adv_instance(hdev, cp->instance);
8783
8784 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8785 mgmt_status(err));
8786 } else {
8787 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8788 mgmt_status(err), &rp, sizeof(rp));
8789 }
8790
8791 unlock:
8792 if (cmd)
8793 mgmt_pending_free(cmd);
8794
8795 hci_dev_unlock(hdev);
8796 }
8797
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8798 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8799 {
8800 struct mgmt_pending_cmd *cmd = data;
8801 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8802
8803 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8804 }
8805
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8806 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8807 void *data, u16 data_len)
8808 {
8809 struct mgmt_cp_add_ext_adv_params *cp = data;
8810 struct mgmt_rp_add_ext_adv_params rp;
8811 struct mgmt_pending_cmd *cmd = NULL;
8812 struct adv_info *adv;
8813 u32 flags, min_interval, max_interval;
8814 u16 timeout, duration;
8815 u8 status;
8816 s8 tx_power;
8817 int err;
8818
8819 BT_DBG("%s", hdev->name);
8820
8821 status = mgmt_le_support(hdev);
8822 if (status)
8823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8824 status);
8825
8826 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8828 MGMT_STATUS_INVALID_PARAMS);
8829
8830 /* The purpose of breaking add_advertising into two separate MGMT calls
8831 * for params and data is to allow more parameters to be added to this
8832 * structure in the future. For this reason, we verify that we have the
8833 * bare minimum structure we know of when the interface was defined. Any
8834 * extra parameters we don't know about will be ignored in this request.
8835 */
8836 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8838 MGMT_STATUS_INVALID_PARAMS);
8839
8840 flags = __le32_to_cpu(cp->flags);
8841
8842 if (!requested_adv_flags_are_valid(hdev, flags))
8843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8844 MGMT_STATUS_INVALID_PARAMS);
8845
8846 hci_dev_lock(hdev);
8847
8848 /* In new interface, we require that we are powered to register */
8849 if (!hdev_is_powered(hdev)) {
8850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8851 MGMT_STATUS_REJECTED);
8852 goto unlock;
8853 }
8854
8855 if (adv_busy(hdev)) {
8856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8857 MGMT_STATUS_BUSY);
8858 goto unlock;
8859 }
8860
8861 /* Parse defined parameters from request, use defaults otherwise */
8862 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8863 __le16_to_cpu(cp->timeout) : 0;
8864
8865 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8866 __le16_to_cpu(cp->duration) :
8867 hdev->def_multi_adv_rotation_duration;
8868
8869 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8870 __le32_to_cpu(cp->min_interval) :
8871 hdev->le_adv_min_interval;
8872
8873 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8874 __le32_to_cpu(cp->max_interval) :
8875 hdev->le_adv_max_interval;
8876
8877 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8878 cp->tx_power :
8879 HCI_ADV_TX_POWER_NO_PREFERENCE;
8880
8881 /* Create advertising instance with no advertising or response data */
8882 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8883 timeout, duration, tx_power, min_interval,
8884 max_interval, 0);
8885
8886 if (IS_ERR(adv)) {
8887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8888 MGMT_STATUS_FAILED);
8889 goto unlock;
8890 }
8891
8892 /* Submit request for advertising params if ext adv available */
8893 if (ext_adv_capable(hdev)) {
8894 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8895 data, data_len);
8896 if (!cmd) {
8897 err = -ENOMEM;
8898 hci_remove_adv_instance(hdev, cp->instance);
8899 goto unlock;
8900 }
8901
8902 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8903 add_ext_adv_params_complete);
8904 if (err < 0)
8905 mgmt_pending_free(cmd);
8906 } else {
8907 rp.instance = cp->instance;
8908 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8909 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8910 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8911 err = mgmt_cmd_complete(sk, hdev->id,
8912 MGMT_OP_ADD_EXT_ADV_PARAMS,
8913 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8914 }
8915
8916 unlock:
8917 hci_dev_unlock(hdev);
8918
8919 return err;
8920 }
8921
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8922 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8923 {
8924 struct mgmt_pending_cmd *cmd = data;
8925 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8926 struct mgmt_rp_add_advertising rp;
8927
8928 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8929
8930 memset(&rp, 0, sizeof(rp));
8931
8932 rp.instance = cp->instance;
8933
8934 if (err)
8935 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8936 mgmt_status(err));
8937 else
8938 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8939 mgmt_status(err), &rp, sizeof(rp));
8940
8941 mgmt_pending_free(cmd);
8942 }
8943
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8944 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8945 {
8946 struct mgmt_pending_cmd *cmd = data;
8947 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8948 int err;
8949
8950 if (ext_adv_capable(hdev)) {
8951 err = hci_update_adv_data_sync(hdev, cp->instance);
8952 if (err)
8953 return err;
8954
8955 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8956 if (err)
8957 return err;
8958
8959 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8960 }
8961
8962 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8963 }
8964
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8965 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8966 u16 data_len)
8967 {
8968 struct mgmt_cp_add_ext_adv_data *cp = data;
8969 struct mgmt_rp_add_ext_adv_data rp;
8970 u8 schedule_instance = 0;
8971 struct adv_info *next_instance;
8972 struct adv_info *adv_instance;
8973 int err = 0;
8974 struct mgmt_pending_cmd *cmd;
8975
8976 BT_DBG("%s", hdev->name);
8977
8978 hci_dev_lock(hdev);
8979
8980 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8981
8982 if (!adv_instance) {
8983 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8984 MGMT_STATUS_INVALID_PARAMS);
8985 goto unlock;
8986 }
8987
8988 /* In new interface, we require that we are powered to register */
8989 if (!hdev_is_powered(hdev)) {
8990 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8991 MGMT_STATUS_REJECTED);
8992 goto clear_new_instance;
8993 }
8994
8995 if (adv_busy(hdev)) {
8996 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8997 MGMT_STATUS_BUSY);
8998 goto clear_new_instance;
8999 }
9000
9001 /* Validate new data */
9002 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9003 cp->adv_data_len, true) ||
9004 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9005 cp->adv_data_len, cp->scan_rsp_len, false)) {
9006 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9007 MGMT_STATUS_INVALID_PARAMS);
9008 goto clear_new_instance;
9009 }
9010
9011 /* Set the data in the advertising instance */
9012 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9013 cp->data, cp->scan_rsp_len,
9014 cp->data + cp->adv_data_len);
9015
9016 /* If using software rotation, determine next instance to use */
9017 if (hdev->cur_adv_instance == cp->instance) {
9018 /* If the currently advertised instance is being changed
9019 * then cancel the current advertising and schedule the
9020 * next instance. If there is only one instance then the
9021 * overridden advertising data will be visible right
9022 * away
9023 */
9024 cancel_adv_timeout(hdev);
9025
9026 next_instance = hci_get_next_instance(hdev, cp->instance);
9027 if (next_instance)
9028 schedule_instance = next_instance->instance;
9029 } else if (!hdev->adv_instance_timeout) {
9030 /* Immediately advertise the new instance if no other
9031 * instance is currently being advertised.
9032 */
9033 schedule_instance = cp->instance;
9034 }
9035
9036 /* If the HCI_ADVERTISING flag is set or there is no instance to
9037 * be advertised then we have no HCI communication to make.
9038 * Simply return.
9039 */
9040 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9041 if (adv_instance->pending) {
9042 mgmt_advertising_added(sk, hdev, cp->instance);
9043 adv_instance->pending = false;
9044 }
9045 rp.instance = cp->instance;
9046 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9047 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9048 goto unlock;
9049 }
9050
9051 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9052 data_len);
9053 if (!cmd) {
9054 err = -ENOMEM;
9055 goto clear_new_instance;
9056 }
9057
9058 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9059 add_ext_adv_data_complete);
9060 if (err < 0) {
9061 mgmt_pending_free(cmd);
9062 goto clear_new_instance;
9063 }
9064
9065 /* We were successful in updating data, so trigger advertising_added
9066 * event if this is an instance that wasn't previously advertising. If
9067 * a failure occurs in the requests we initiated, we will remove the
9068 * instance again in add_advertising_complete
9069 */
9070 if (adv_instance->pending)
9071 mgmt_advertising_added(sk, hdev, cp->instance);
9072
9073 goto unlock;
9074
9075 clear_new_instance:
9076 hci_remove_adv_instance(hdev, cp->instance);
9077
9078 unlock:
9079 hci_dev_unlock(hdev);
9080
9081 return err;
9082 }
9083
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9084 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9085 int err)
9086 {
9087 struct mgmt_pending_cmd *cmd = data;
9088 struct mgmt_cp_remove_advertising *cp = cmd->param;
9089 struct mgmt_rp_remove_advertising rp;
9090
9091 bt_dev_dbg(hdev, "err %d", err);
9092
9093 memset(&rp, 0, sizeof(rp));
9094 rp.instance = cp->instance;
9095
9096 if (err)
9097 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9098 mgmt_status(err));
9099 else
9100 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9101 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9102
9103 mgmt_pending_free(cmd);
9104 }
9105
remove_advertising_sync(struct hci_dev * hdev,void * data)9106 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9107 {
9108 struct mgmt_pending_cmd *cmd = data;
9109 struct mgmt_cp_remove_advertising *cp = cmd->param;
9110 int err;
9111
9112 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9113 if (err)
9114 return err;
9115
9116 if (list_empty(&hdev->adv_instances))
9117 err = hci_disable_advertising_sync(hdev);
9118
9119 return err;
9120 }
9121
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9122 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9123 void *data, u16 data_len)
9124 {
9125 struct mgmt_cp_remove_advertising *cp = data;
9126 struct mgmt_pending_cmd *cmd;
9127 int err;
9128
9129 bt_dev_dbg(hdev, "sock %p", sk);
9130
9131 hci_dev_lock(hdev);
9132
9133 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9134 err = mgmt_cmd_status(sk, hdev->id,
9135 MGMT_OP_REMOVE_ADVERTISING,
9136 MGMT_STATUS_INVALID_PARAMS);
9137 goto unlock;
9138 }
9139
9140 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9141 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9142 MGMT_STATUS_BUSY);
9143 goto unlock;
9144 }
9145
9146 if (list_empty(&hdev->adv_instances)) {
9147 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9148 MGMT_STATUS_INVALID_PARAMS);
9149 goto unlock;
9150 }
9151
9152 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9153 data_len);
9154 if (!cmd) {
9155 err = -ENOMEM;
9156 goto unlock;
9157 }
9158
9159 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9160 remove_advertising_complete);
9161 if (err < 0)
9162 mgmt_pending_free(cmd);
9163
9164 unlock:
9165 hci_dev_unlock(hdev);
9166
9167 return err;
9168 }
9169
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9170 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9171 void *data, u16 data_len)
9172 {
9173 struct mgmt_cp_get_adv_size_info *cp = data;
9174 struct mgmt_rp_get_adv_size_info rp;
9175 u32 flags, supported_flags;
9176
9177 bt_dev_dbg(hdev, "sock %p", sk);
9178
9179 if (!lmp_le_capable(hdev))
9180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9181 MGMT_STATUS_REJECTED);
9182
9183 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9185 MGMT_STATUS_INVALID_PARAMS);
9186
9187 flags = __le32_to_cpu(cp->flags);
9188
9189 /* The current implementation only supports a subset of the specified
9190 * flags.
9191 */
9192 supported_flags = get_supported_adv_flags(hdev);
9193 if (flags & ~supported_flags)
9194 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9195 MGMT_STATUS_INVALID_PARAMS);
9196
9197 rp.instance = cp->instance;
9198 rp.flags = cp->flags;
9199 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9200 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9201
9202 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9203 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9204 }
9205
9206 static const struct hci_mgmt_handler mgmt_handlers[] = {
9207 { NULL }, /* 0x0000 (no command) */
9208 { read_version, MGMT_READ_VERSION_SIZE,
9209 HCI_MGMT_NO_HDEV |
9210 HCI_MGMT_UNTRUSTED },
9211 { read_commands, MGMT_READ_COMMANDS_SIZE,
9212 HCI_MGMT_NO_HDEV |
9213 HCI_MGMT_UNTRUSTED },
9214 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9215 HCI_MGMT_NO_HDEV |
9216 HCI_MGMT_UNTRUSTED },
9217 { read_controller_info, MGMT_READ_INFO_SIZE,
9218 HCI_MGMT_UNTRUSTED },
9219 { set_powered, MGMT_SETTING_SIZE },
9220 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9221 { set_connectable, MGMT_SETTING_SIZE },
9222 { set_fast_connectable, MGMT_SETTING_SIZE },
9223 { set_bondable, MGMT_SETTING_SIZE },
9224 { set_link_security, MGMT_SETTING_SIZE },
9225 { set_ssp, MGMT_SETTING_SIZE },
9226 { set_hs, MGMT_SETTING_SIZE },
9227 { set_le, MGMT_SETTING_SIZE },
9228 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9229 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9230 { add_uuid, MGMT_ADD_UUID_SIZE },
9231 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9232 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9233 HCI_MGMT_VAR_LEN },
9234 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9235 HCI_MGMT_VAR_LEN },
9236 { disconnect, MGMT_DISCONNECT_SIZE },
9237 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9238 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9239 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9240 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9241 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9242 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9243 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9244 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9245 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9246 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9247 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9248 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9249 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9250 HCI_MGMT_VAR_LEN },
9251 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9252 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9253 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9254 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9255 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9256 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9257 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9258 { set_advertising, MGMT_SETTING_SIZE },
9259 { set_bredr, MGMT_SETTING_SIZE },
9260 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9261 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9262 { set_secure_conn, MGMT_SETTING_SIZE },
9263 { set_debug_keys, MGMT_SETTING_SIZE },
9264 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9265 { load_irks, MGMT_LOAD_IRKS_SIZE,
9266 HCI_MGMT_VAR_LEN },
9267 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9268 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9269 { add_device, MGMT_ADD_DEVICE_SIZE },
9270 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9271 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9272 HCI_MGMT_VAR_LEN },
9273 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9274 HCI_MGMT_NO_HDEV |
9275 HCI_MGMT_UNTRUSTED },
9276 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9277 HCI_MGMT_UNCONFIGURED |
9278 HCI_MGMT_UNTRUSTED },
9279 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9280 HCI_MGMT_UNCONFIGURED },
9281 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9282 HCI_MGMT_UNCONFIGURED },
9283 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9284 HCI_MGMT_VAR_LEN },
9285 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9286 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9287 HCI_MGMT_NO_HDEV |
9288 HCI_MGMT_UNTRUSTED },
9289 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9290 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9291 HCI_MGMT_VAR_LEN },
9292 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9293 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9294 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9295 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9296 HCI_MGMT_UNTRUSTED },
9297 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9298 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9299 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9300 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9301 HCI_MGMT_VAR_LEN },
9302 { set_wideband_speech, MGMT_SETTING_SIZE },
9303 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9304 HCI_MGMT_UNTRUSTED },
9305 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9306 HCI_MGMT_UNTRUSTED |
9307 HCI_MGMT_HDEV_OPTIONAL },
9308 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9309 HCI_MGMT_VAR_LEN |
9310 HCI_MGMT_HDEV_OPTIONAL },
9311 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9312 HCI_MGMT_UNTRUSTED },
9313 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9314 HCI_MGMT_VAR_LEN },
9315 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9316 HCI_MGMT_UNTRUSTED },
9317 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9318 HCI_MGMT_VAR_LEN },
9319 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9320 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9321 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9322 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9323 HCI_MGMT_VAR_LEN },
9324 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9325 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9326 HCI_MGMT_VAR_LEN },
9327 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9328 HCI_MGMT_VAR_LEN },
9329 { add_adv_patterns_monitor_rssi,
9330 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9331 HCI_MGMT_VAR_LEN },
9332 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9333 HCI_MGMT_VAR_LEN },
9334 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9335 { mesh_send, MGMT_MESH_SEND_SIZE,
9336 HCI_MGMT_VAR_LEN },
9337 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9338 };
9339
mgmt_index_added(struct hci_dev * hdev)9340 void mgmt_index_added(struct hci_dev *hdev)
9341 {
9342 struct mgmt_ev_ext_index ev;
9343
9344 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9345 return;
9346
9347 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9348 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9349 HCI_MGMT_UNCONF_INDEX_EVENTS);
9350 ev.type = 0x01;
9351 } else {
9352 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9353 HCI_MGMT_INDEX_EVENTS);
9354 ev.type = 0x00;
9355 }
9356
9357 ev.bus = hdev->bus;
9358
9359 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9360 HCI_MGMT_EXT_INDEX_EVENTS);
9361 }
9362
mgmt_index_removed(struct hci_dev * hdev)9363 void mgmt_index_removed(struct hci_dev *hdev)
9364 {
9365 struct mgmt_ev_ext_index ev;
9366 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9367
9368 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9369 return;
9370
9371 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9372
9373 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9374 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9375 HCI_MGMT_UNCONF_INDEX_EVENTS);
9376 ev.type = 0x01;
9377 } else {
9378 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9379 HCI_MGMT_INDEX_EVENTS);
9380 ev.type = 0x00;
9381 }
9382
9383 ev.bus = hdev->bus;
9384
9385 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9386 HCI_MGMT_EXT_INDEX_EVENTS);
9387
9388 /* Cancel any remaining timed work */
9389 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9390 return;
9391 cancel_delayed_work_sync(&hdev->discov_off);
9392 cancel_delayed_work_sync(&hdev->service_cache);
9393 cancel_delayed_work_sync(&hdev->rpa_expired);
9394 }
9395
mgmt_power_on(struct hci_dev * hdev,int err)9396 void mgmt_power_on(struct hci_dev *hdev, int err)
9397 {
9398 struct cmd_lookup match = { NULL, hdev };
9399
9400 bt_dev_dbg(hdev, "err %d", err);
9401
9402 hci_dev_lock(hdev);
9403
9404 if (!err) {
9405 restart_le_actions(hdev);
9406 hci_update_passive_scan(hdev);
9407 }
9408
9409 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9410
9411 new_settings(hdev, match.sk);
9412
9413 if (match.sk)
9414 sock_put(match.sk);
9415
9416 hci_dev_unlock(hdev);
9417 }
9418
__mgmt_power_off(struct hci_dev * hdev)9419 void __mgmt_power_off(struct hci_dev *hdev)
9420 {
9421 struct cmd_lookup match = { NULL, hdev };
9422 u8 zero_cod[] = { 0, 0, 0 };
9423
9424 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9425
9426 /* If the power off is because of hdev unregistration let
9427 * use the appropriate INVALID_INDEX status. Otherwise use
9428 * NOT_POWERED. We cover both scenarios here since later in
9429 * mgmt_index_removed() any hci_conn callbacks will have already
9430 * been triggered, potentially causing misleading DISCONNECTED
9431 * status responses.
9432 */
9433 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9434 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9435 else
9436 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9437
9438 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9439
9440 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9441 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9442 zero_cod, sizeof(zero_cod),
9443 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9444 ext_info_changed(hdev, NULL);
9445 }
9446
9447 new_settings(hdev, match.sk);
9448
9449 if (match.sk)
9450 sock_put(match.sk);
9451 }
9452
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9453 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9454 {
9455 struct mgmt_pending_cmd *cmd;
9456 u8 status;
9457
9458 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9459 if (!cmd)
9460 return;
9461
9462 if (err == -ERFKILL)
9463 status = MGMT_STATUS_RFKILLED;
9464 else
9465 status = MGMT_STATUS_FAILED;
9466
9467 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9468
9469 mgmt_pending_remove(cmd);
9470 }
9471
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9472 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9473 bool persistent)
9474 {
9475 struct mgmt_ev_new_link_key ev;
9476
9477 memset(&ev, 0, sizeof(ev));
9478
9479 ev.store_hint = persistent;
9480 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9481 ev.key.addr.type = BDADDR_BREDR;
9482 ev.key.type = key->type;
9483 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9484 ev.key.pin_len = key->pin_len;
9485
9486 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9487 }
9488
mgmt_ltk_type(struct smp_ltk * ltk)9489 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9490 {
9491 switch (ltk->type) {
9492 case SMP_LTK:
9493 case SMP_LTK_RESPONDER:
9494 if (ltk->authenticated)
9495 return MGMT_LTK_AUTHENTICATED;
9496 return MGMT_LTK_UNAUTHENTICATED;
9497 case SMP_LTK_P256:
9498 if (ltk->authenticated)
9499 return MGMT_LTK_P256_AUTH;
9500 return MGMT_LTK_P256_UNAUTH;
9501 case SMP_LTK_P256_DEBUG:
9502 return MGMT_LTK_P256_DEBUG;
9503 }
9504
9505 return MGMT_LTK_UNAUTHENTICATED;
9506 }
9507
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9508 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9509 {
9510 struct mgmt_ev_new_long_term_key ev;
9511
9512 memset(&ev, 0, sizeof(ev));
9513
9514 /* Devices using resolvable or non-resolvable random addresses
9515 * without providing an identity resolving key don't require
9516 * to store long term keys. Their addresses will change the
9517 * next time around.
9518 *
9519 * Only when a remote device provides an identity address
9520 * make sure the long term key is stored. If the remote
9521 * identity is known, the long term keys are internally
9522 * mapped to the identity address. So allow static random
9523 * and public addresses here.
9524 */
9525 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9526 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9527 ev.store_hint = 0x00;
9528 else
9529 ev.store_hint = persistent;
9530
9531 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9532 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9533 ev.key.type = mgmt_ltk_type(key);
9534 ev.key.enc_size = key->enc_size;
9535 ev.key.ediv = key->ediv;
9536 ev.key.rand = key->rand;
9537
9538 if (key->type == SMP_LTK)
9539 ev.key.initiator = 1;
9540
9541 /* Make sure we copy only the significant bytes based on the
9542 * encryption key size, and set the rest of the value to zeroes.
9543 */
9544 memcpy(ev.key.val, key->val, key->enc_size);
9545 memset(ev.key.val + key->enc_size, 0,
9546 sizeof(ev.key.val) - key->enc_size);
9547
9548 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9549 }
9550
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9551 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9552 {
9553 struct mgmt_ev_new_irk ev;
9554
9555 memset(&ev, 0, sizeof(ev));
9556
9557 ev.store_hint = persistent;
9558
9559 bacpy(&ev.rpa, &irk->rpa);
9560 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9561 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9562 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9563
9564 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9565 }
9566
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9567 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9568 bool persistent)
9569 {
9570 struct mgmt_ev_new_csrk ev;
9571
9572 memset(&ev, 0, sizeof(ev));
9573
9574 /* Devices using resolvable or non-resolvable random addresses
9575 * without providing an identity resolving key don't require
9576 * to store signature resolving keys. Their addresses will change
9577 * the next time around.
9578 *
9579 * Only when a remote device provides an identity address
9580 * make sure the signature resolving key is stored. So allow
9581 * static random and public addresses here.
9582 */
9583 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9584 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9585 ev.store_hint = 0x00;
9586 else
9587 ev.store_hint = persistent;
9588
9589 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9590 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9591 ev.key.type = csrk->type;
9592 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9593
9594 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9595 }
9596
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9597 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9598 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9599 u16 max_interval, u16 latency, u16 timeout)
9600 {
9601 struct mgmt_ev_new_conn_param ev;
9602
9603 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9604 return;
9605
9606 memset(&ev, 0, sizeof(ev));
9607 bacpy(&ev.addr.bdaddr, bdaddr);
9608 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9609 ev.store_hint = store_hint;
9610 ev.min_interval = cpu_to_le16(min_interval);
9611 ev.max_interval = cpu_to_le16(max_interval);
9612 ev.latency = cpu_to_le16(latency);
9613 ev.timeout = cpu_to_le16(timeout);
9614
9615 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9616 }
9617
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9618 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9619 u8 *name, u8 name_len)
9620 {
9621 struct sk_buff *skb;
9622 struct mgmt_ev_device_connected *ev;
9623 u16 eir_len = 0;
9624 u32 flags = 0;
9625
9626 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9627 return;
9628
9629 /* allocate buff for LE or BR/EDR adv */
9630 if (conn->le_adv_data_len > 0)
9631 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9632 sizeof(*ev) + conn->le_adv_data_len);
9633 else
9634 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9635 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9636 eir_precalc_len(sizeof(conn->dev_class)));
9637
9638 ev = skb_put(skb, sizeof(*ev));
9639 bacpy(&ev->addr.bdaddr, &conn->dst);
9640 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9641
9642 if (conn->out)
9643 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9644
9645 ev->flags = __cpu_to_le32(flags);
9646
9647 /* We must ensure that the EIR Data fields are ordered and
9648 * unique. Keep it simple for now and avoid the problem by not
9649 * adding any BR/EDR data to the LE adv.
9650 */
9651 if (conn->le_adv_data_len > 0) {
9652 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9653 eir_len = conn->le_adv_data_len;
9654 } else {
9655 if (name)
9656 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9657
9658 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9659 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9660 conn->dev_class, sizeof(conn->dev_class));
9661 }
9662
9663 ev->eir_len = cpu_to_le16(eir_len);
9664
9665 mgmt_event_skb(skb, NULL);
9666 }
9667
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9668 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9669 {
9670 struct hci_dev *hdev = data;
9671 struct mgmt_cp_unpair_device *cp = cmd->param;
9672
9673 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9674
9675 cmd->cmd_complete(cmd, 0);
9676 mgmt_pending_remove(cmd);
9677 }
9678
mgmt_powering_down(struct hci_dev * hdev)9679 bool mgmt_powering_down(struct hci_dev *hdev)
9680 {
9681 struct mgmt_pending_cmd *cmd;
9682 struct mgmt_mode *cp;
9683
9684 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9685 if (!cmd)
9686 return false;
9687
9688 cp = cmd->param;
9689 if (!cp->val)
9690 return true;
9691
9692 return false;
9693 }
9694
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9695 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9696 u8 link_type, u8 addr_type, u8 reason,
9697 bool mgmt_connected)
9698 {
9699 struct mgmt_ev_device_disconnected ev;
9700 struct sock *sk = NULL;
9701
9702 if (!mgmt_connected)
9703 return;
9704
9705 if (link_type != ACL_LINK && link_type != LE_LINK)
9706 return;
9707
9708 bacpy(&ev.addr.bdaddr, bdaddr);
9709 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9710 ev.reason = reason;
9711
9712 /* Report disconnects due to suspend */
9713 if (hdev->suspended)
9714 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9715
9716 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9717
9718 if (sk)
9719 sock_put(sk);
9720 }
9721
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9722 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9723 u8 link_type, u8 addr_type, u8 status)
9724 {
9725 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9726 struct mgmt_cp_disconnect *cp;
9727 struct mgmt_pending_cmd *cmd;
9728
9729 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9730 hdev);
9731
9732 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9733 if (!cmd)
9734 return;
9735
9736 cp = cmd->param;
9737
9738 if (bacmp(bdaddr, &cp->addr.bdaddr))
9739 return;
9740
9741 if (cp->addr.type != bdaddr_type)
9742 return;
9743
9744 cmd->cmd_complete(cmd, mgmt_status(status));
9745 mgmt_pending_remove(cmd);
9746 }
9747
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9748 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9749 {
9750 struct mgmt_ev_connect_failed ev;
9751
9752 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9753 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9754 conn->dst_type, status, true);
9755 return;
9756 }
9757
9758 bacpy(&ev.addr.bdaddr, &conn->dst);
9759 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9760 ev.status = mgmt_status(status);
9761
9762 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9763 }
9764
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9765 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9766 {
9767 struct mgmt_ev_pin_code_request ev;
9768
9769 bacpy(&ev.addr.bdaddr, bdaddr);
9770 ev.addr.type = BDADDR_BREDR;
9771 ev.secure = secure;
9772
9773 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9774 }
9775
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9776 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9777 u8 status)
9778 {
9779 struct mgmt_pending_cmd *cmd;
9780
9781 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9782 if (!cmd)
9783 return;
9784
9785 cmd->cmd_complete(cmd, mgmt_status(status));
9786 mgmt_pending_remove(cmd);
9787 }
9788
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9789 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9790 u8 status)
9791 {
9792 struct mgmt_pending_cmd *cmd;
9793
9794 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9795 if (!cmd)
9796 return;
9797
9798 cmd->cmd_complete(cmd, mgmt_status(status));
9799 mgmt_pending_remove(cmd);
9800 }
9801
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9802 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9803 u8 link_type, u8 addr_type, u32 value,
9804 u8 confirm_hint)
9805 {
9806 struct mgmt_ev_user_confirm_request ev;
9807
9808 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9809
9810 bacpy(&ev.addr.bdaddr, bdaddr);
9811 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9812 ev.confirm_hint = confirm_hint;
9813 ev.value = cpu_to_le32(value);
9814
9815 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9816 NULL);
9817 }
9818
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9819 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9820 u8 link_type, u8 addr_type)
9821 {
9822 struct mgmt_ev_user_passkey_request ev;
9823
9824 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9825
9826 bacpy(&ev.addr.bdaddr, bdaddr);
9827 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9828
9829 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9830 NULL);
9831 }
9832
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9833 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 u8 link_type, u8 addr_type, u8 status,
9835 u8 opcode)
9836 {
9837 struct mgmt_pending_cmd *cmd;
9838
9839 cmd = pending_find(opcode, hdev);
9840 if (!cmd)
9841 return -ENOENT;
9842
9843 cmd->cmd_complete(cmd, mgmt_status(status));
9844 mgmt_pending_remove(cmd);
9845
9846 return 0;
9847 }
9848
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9849 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9850 u8 link_type, u8 addr_type, u8 status)
9851 {
9852 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9853 status, MGMT_OP_USER_CONFIRM_REPLY);
9854 }
9855
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9856 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9857 u8 link_type, u8 addr_type, u8 status)
9858 {
9859 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9860 status,
9861 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9862 }
9863
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9864 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9865 u8 link_type, u8 addr_type, u8 status)
9866 {
9867 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9868 status, MGMT_OP_USER_PASSKEY_REPLY);
9869 }
9870
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9871 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9872 u8 link_type, u8 addr_type, u8 status)
9873 {
9874 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9875 status,
9876 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9877 }
9878
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9879 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9880 u8 link_type, u8 addr_type, u32 passkey,
9881 u8 entered)
9882 {
9883 struct mgmt_ev_passkey_notify ev;
9884
9885 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9886
9887 bacpy(&ev.addr.bdaddr, bdaddr);
9888 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9889 ev.passkey = __cpu_to_le32(passkey);
9890 ev.entered = entered;
9891
9892 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9893 }
9894
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9895 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9896 {
9897 struct mgmt_ev_auth_failed ev;
9898 struct mgmt_pending_cmd *cmd;
9899 u8 status = mgmt_status(hci_status);
9900
9901 bacpy(&ev.addr.bdaddr, &conn->dst);
9902 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9903 ev.status = status;
9904
9905 cmd = find_pairing(conn);
9906
9907 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9908 cmd ? cmd->sk : NULL);
9909
9910 if (cmd) {
9911 cmd->cmd_complete(cmd, status);
9912 mgmt_pending_remove(cmd);
9913 }
9914 }
9915
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9916 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9917 {
9918 struct cmd_lookup match = { NULL, hdev };
9919 bool changed;
9920
9921 if (status) {
9922 u8 mgmt_err = mgmt_status(status);
9923 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9924 cmd_status_rsp, &mgmt_err);
9925 return;
9926 }
9927
9928 if (test_bit(HCI_AUTH, &hdev->flags))
9929 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9930 else
9931 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9932
9933 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9934 &match);
9935
9936 if (changed)
9937 new_settings(hdev, match.sk);
9938
9939 if (match.sk)
9940 sock_put(match.sk);
9941 }
9942
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9943 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9944 {
9945 struct cmd_lookup *match = data;
9946
9947 if (match->sk == NULL) {
9948 match->sk = cmd->sk;
9949 sock_hold(match->sk);
9950 }
9951 }
9952
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9953 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9954 u8 status)
9955 {
9956 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9957
9958 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9959 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9960 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9961
9962 if (!status) {
9963 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9964 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9965 ext_info_changed(hdev, NULL);
9966 }
9967
9968 if (match.sk)
9969 sock_put(match.sk);
9970 }
9971
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9972 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9973 {
9974 struct mgmt_cp_set_local_name ev;
9975 struct mgmt_pending_cmd *cmd;
9976
9977 if (status)
9978 return;
9979
9980 memset(&ev, 0, sizeof(ev));
9981 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9982 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9983
9984 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9985 if (!cmd) {
9986 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9987
9988 /* If this is a HCI command related to powering on the
9989 * HCI dev don't send any mgmt signals.
9990 */
9991 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9992 return;
9993 }
9994
9995 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9996 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9997 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9998 }
9999
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10000 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10001 {
10002 int i;
10003
10004 for (i = 0; i < uuid_count; i++) {
10005 if (!memcmp(uuid, uuids[i], 16))
10006 return true;
10007 }
10008
10009 return false;
10010 }
10011
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10012 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10013 {
10014 u16 parsed = 0;
10015
10016 while (parsed < eir_len) {
10017 u8 field_len = eir[0];
10018 u8 uuid[16];
10019 int i;
10020
10021 if (field_len == 0)
10022 break;
10023
10024 if (eir_len - parsed < field_len + 1)
10025 break;
10026
10027 switch (eir[1]) {
10028 case EIR_UUID16_ALL:
10029 case EIR_UUID16_SOME:
10030 for (i = 0; i + 3 <= field_len; i += 2) {
10031 memcpy(uuid, bluetooth_base_uuid, 16);
10032 uuid[13] = eir[i + 3];
10033 uuid[12] = eir[i + 2];
10034 if (has_uuid(uuid, uuid_count, uuids))
10035 return true;
10036 }
10037 break;
10038 case EIR_UUID32_ALL:
10039 case EIR_UUID32_SOME:
10040 for (i = 0; i + 5 <= field_len; i += 4) {
10041 memcpy(uuid, bluetooth_base_uuid, 16);
10042 uuid[15] = eir[i + 5];
10043 uuid[14] = eir[i + 4];
10044 uuid[13] = eir[i + 3];
10045 uuid[12] = eir[i + 2];
10046 if (has_uuid(uuid, uuid_count, uuids))
10047 return true;
10048 }
10049 break;
10050 case EIR_UUID128_ALL:
10051 case EIR_UUID128_SOME:
10052 for (i = 0; i + 17 <= field_len; i += 16) {
10053 memcpy(uuid, eir + i + 2, 16);
10054 if (has_uuid(uuid, uuid_count, uuids))
10055 return true;
10056 }
10057 break;
10058 }
10059
10060 parsed += field_len + 1;
10061 eir += field_len + 1;
10062 }
10063
10064 return false;
10065 }
10066
restart_le_scan(struct hci_dev * hdev)10067 static void restart_le_scan(struct hci_dev *hdev)
10068 {
10069 /* If controller is not scanning we are done. */
10070 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10071 return;
10072
10073 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10074 hdev->discovery.scan_start +
10075 hdev->discovery.scan_duration))
10076 return;
10077
10078 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10079 DISCOV_LE_RESTART_DELAY);
10080 }
10081
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10082 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10083 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10084 {
10085 /* If a RSSI threshold has been specified, and
10086 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10087 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10088 * is set, let it through for further processing, as we might need to
10089 * restart the scan.
10090 *
10091 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10092 * the results are also dropped.
10093 */
10094 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10095 (rssi == HCI_RSSI_INVALID ||
10096 (rssi < hdev->discovery.rssi &&
10097 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10098 return false;
10099
10100 if (hdev->discovery.uuid_count != 0) {
10101 /* If a list of UUIDs is provided in filter, results with no
10102 * matching UUID should be dropped.
10103 */
10104 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10105 hdev->discovery.uuids) &&
10106 !eir_has_uuids(scan_rsp, scan_rsp_len,
10107 hdev->discovery.uuid_count,
10108 hdev->discovery.uuids))
10109 return false;
10110 }
10111
10112 /* If duplicate filtering does not report RSSI changes, then restart
10113 * scanning to ensure updated result with updated RSSI values.
10114 */
10115 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10116 restart_le_scan(hdev);
10117
10118 /* Validate RSSI value against the RSSI threshold once more. */
10119 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10120 rssi < hdev->discovery.rssi)
10121 return false;
10122 }
10123
10124 return true;
10125 }
10126
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10127 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10128 bdaddr_t *bdaddr, u8 addr_type)
10129 {
10130 struct mgmt_ev_adv_monitor_device_lost ev;
10131
10132 ev.monitor_handle = cpu_to_le16(handle);
10133 bacpy(&ev.addr.bdaddr, bdaddr);
10134 ev.addr.type = addr_type;
10135
10136 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10137 NULL);
10138 }
10139
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10140 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10141 struct sk_buff *skb,
10142 struct sock *skip_sk,
10143 u16 handle)
10144 {
10145 struct sk_buff *advmon_skb;
10146 size_t advmon_skb_len;
10147 __le16 *monitor_handle;
10148
10149 if (!skb)
10150 return;
10151
10152 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10153 sizeof(struct mgmt_ev_device_found)) + skb->len;
10154 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10155 advmon_skb_len);
10156 if (!advmon_skb)
10157 return;
10158
10159 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10160 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10161 * store monitor_handle of the matched monitor.
10162 */
10163 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10164 *monitor_handle = cpu_to_le16(handle);
10165 skb_put_data(advmon_skb, skb->data, skb->len);
10166
10167 mgmt_event_skb(advmon_skb, skip_sk);
10168 }
10169
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10170 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10171 bdaddr_t *bdaddr, bool report_device,
10172 struct sk_buff *skb,
10173 struct sock *skip_sk)
10174 {
10175 struct monitored_device *dev, *tmp;
10176 bool matched = false;
10177 bool notified = false;
10178
10179 /* We have received the Advertisement Report because:
10180 * 1. the kernel has initiated active discovery
10181 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10182 * passive scanning
10183 * 3. if none of the above is true, we have one or more active
10184 * Advertisement Monitor
10185 *
10186 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10187 * and report ONLY one advertisement per device for the matched Monitor
10188 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10189 *
10190 * For case 3, since we are not active scanning and all advertisements
10191 * received are due to a matched Advertisement Monitor, report all
10192 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10193 */
10194 if (report_device && !hdev->advmon_pend_notify) {
10195 mgmt_event_skb(skb, skip_sk);
10196 return;
10197 }
10198
10199 hdev->advmon_pend_notify = false;
10200
10201 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10202 if (!bacmp(&dev->bdaddr, bdaddr)) {
10203 matched = true;
10204
10205 if (!dev->notified) {
10206 mgmt_send_adv_monitor_device_found(hdev, skb,
10207 skip_sk,
10208 dev->handle);
10209 notified = true;
10210 dev->notified = true;
10211 }
10212 }
10213
10214 if (!dev->notified)
10215 hdev->advmon_pend_notify = true;
10216 }
10217
10218 if (!report_device &&
10219 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10220 /* Handle 0 indicates that we are not active scanning and this
10221 * is a subsequent advertisement report for an already matched
10222 * Advertisement Monitor or the controller offloading support
10223 * is not available.
10224 */
10225 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10226 }
10227
10228 if (report_device)
10229 mgmt_event_skb(skb, skip_sk);
10230 else
10231 kfree_skb(skb);
10232 }
10233
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10234 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10235 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10236 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10237 u64 instant)
10238 {
10239 struct sk_buff *skb;
10240 struct mgmt_ev_mesh_device_found *ev;
10241 int i, j;
10242
10243 if (!hdev->mesh_ad_types[0])
10244 goto accepted;
10245
10246 /* Scan for requested AD types */
10247 if (eir_len > 0) {
10248 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10249 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10250 if (!hdev->mesh_ad_types[j])
10251 break;
10252
10253 if (hdev->mesh_ad_types[j] == eir[i + 1])
10254 goto accepted;
10255 }
10256 }
10257 }
10258
10259 if (scan_rsp_len > 0) {
10260 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10261 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10262 if (!hdev->mesh_ad_types[j])
10263 break;
10264
10265 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10266 goto accepted;
10267 }
10268 }
10269 }
10270
10271 return;
10272
10273 accepted:
10274 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10275 sizeof(*ev) + eir_len + scan_rsp_len);
10276 if (!skb)
10277 return;
10278
10279 ev = skb_put(skb, sizeof(*ev));
10280
10281 bacpy(&ev->addr.bdaddr, bdaddr);
10282 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10283 ev->rssi = rssi;
10284 ev->flags = cpu_to_le32(flags);
10285 ev->instant = cpu_to_le64(instant);
10286
10287 if (eir_len > 0)
10288 /* Copy EIR or advertising data into event */
10289 skb_put_data(skb, eir, eir_len);
10290
10291 if (scan_rsp_len > 0)
10292 /* Append scan response data to event */
10293 skb_put_data(skb, scan_rsp, scan_rsp_len);
10294
10295 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10296
10297 mgmt_event_skb(skb, NULL);
10298 }
10299
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10300 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10301 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10302 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10303 u64 instant)
10304 {
10305 struct sk_buff *skb;
10306 struct mgmt_ev_device_found *ev;
10307 bool report_device = hci_discovery_active(hdev);
10308
10309 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10310 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10311 eir, eir_len, scan_rsp, scan_rsp_len,
10312 instant);
10313
10314 /* Don't send events for a non-kernel initiated discovery. With
10315 * LE one exception is if we have pend_le_reports > 0 in which
10316 * case we're doing passive scanning and want these events.
10317 */
10318 if (!hci_discovery_active(hdev)) {
10319 if (link_type == ACL_LINK)
10320 return;
10321 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10322 report_device = true;
10323 else if (!hci_is_adv_monitoring(hdev))
10324 return;
10325 }
10326
10327 if (hdev->discovery.result_filtering) {
10328 /* We are using service discovery */
10329 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10330 scan_rsp_len))
10331 return;
10332 }
10333
10334 if (hdev->discovery.limited) {
10335 /* Check for limited discoverable bit */
10336 if (dev_class) {
10337 if (!(dev_class[1] & 0x20))
10338 return;
10339 } else {
10340 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10341 if (!flags || !(flags[0] & LE_AD_LIMITED))
10342 return;
10343 }
10344 }
10345
10346 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10347 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10348 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10349 if (!skb)
10350 return;
10351
10352 ev = skb_put(skb, sizeof(*ev));
10353
10354 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10355 * RSSI value was reported as 0 when not available. This behavior
10356 * is kept when using device discovery. This is required for full
10357 * backwards compatibility with the API.
10358 *
10359 * However when using service discovery, the value 127 will be
10360 * returned when the RSSI is not available.
10361 */
10362 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10363 link_type == ACL_LINK)
10364 rssi = 0;
10365
10366 bacpy(&ev->addr.bdaddr, bdaddr);
10367 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10368 ev->rssi = rssi;
10369 ev->flags = cpu_to_le32(flags);
10370
10371 if (eir_len > 0)
10372 /* Copy EIR or advertising data into event */
10373 skb_put_data(skb, eir, eir_len);
10374
10375 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10376 u8 eir_cod[5];
10377
10378 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10379 dev_class, 3);
10380 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10381 }
10382
10383 if (scan_rsp_len > 0)
10384 /* Append scan response data to event */
10385 skb_put_data(skb, scan_rsp, scan_rsp_len);
10386
10387 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10388
10389 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10390 }
10391
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10392 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10393 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10394 {
10395 struct sk_buff *skb;
10396 struct mgmt_ev_device_found *ev;
10397 u16 eir_len = 0;
10398 u32 flags = 0;
10399
10400 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10401 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10402
10403 ev = skb_put(skb, sizeof(*ev));
10404 bacpy(&ev->addr.bdaddr, bdaddr);
10405 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10406 ev->rssi = rssi;
10407
10408 if (name)
10409 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10410 else
10411 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10412
10413 ev->eir_len = cpu_to_le16(eir_len);
10414 ev->flags = cpu_to_le32(flags);
10415
10416 mgmt_event_skb(skb, NULL);
10417 }
10418
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10419 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10420 {
10421 struct mgmt_ev_discovering ev;
10422
10423 bt_dev_dbg(hdev, "discovering %u", discovering);
10424
10425 memset(&ev, 0, sizeof(ev));
10426 ev.type = hdev->discovery.type;
10427 ev.discovering = discovering;
10428
10429 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10430 }
10431
mgmt_suspending(struct hci_dev * hdev,u8 state)10432 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10433 {
10434 struct mgmt_ev_controller_suspend ev;
10435
10436 ev.suspend_state = state;
10437 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10438 }
10439
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10440 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10441 u8 addr_type)
10442 {
10443 struct mgmt_ev_controller_resume ev;
10444
10445 ev.wake_reason = reason;
10446 if (bdaddr) {
10447 bacpy(&ev.addr.bdaddr, bdaddr);
10448 ev.addr.type = addr_type;
10449 } else {
10450 memset(&ev.addr, 0, sizeof(ev.addr));
10451 }
10452
10453 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10454 }
10455
10456 static struct hci_mgmt_chan chan = {
10457 .channel = HCI_CHANNEL_CONTROL,
10458 .handler_count = ARRAY_SIZE(mgmt_handlers),
10459 .handlers = mgmt_handlers,
10460 .hdev_init = mgmt_init_hdev,
10461 };
10462
mgmt_init(void)10463 int mgmt_init(void)
10464 {
10465 return hci_mgmt_chan_register(&chan);
10466 }
10467
mgmt_exit(void)10468 void mgmt_exit(void)
10469 {
10470 hci_mgmt_chan_unregister(&chan);
10471 }
10472
mgmt_cleanup(struct sock * sk)10473 void mgmt_cleanup(struct sock *sk)
10474 {
10475 struct mgmt_mesh_tx *mesh_tx;
10476 struct hci_dev *hdev;
10477
10478 read_lock(&hci_dev_list_lock);
10479
10480 list_for_each_entry(hdev, &hci_dev_list, list) {
10481 do {
10482 mesh_tx = mgmt_mesh_next(hdev, sk);
10483
10484 if (mesh_tx)
10485 mesh_send_complete(hdev, mesh_tx, true);
10486 } while (mesh_tx);
10487 }
10488
10489 read_unlock(&hci_dev_list_lock);
10490 }
10491