1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
46
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855
856 return settings;
857 }
858
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 u32 settings = 0;
862
863 if (hdev_is_powered(hdev))
864 settings |= MGMT_SETTING_POWERED;
865
866 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 settings |= MGMT_SETTING_CONNECTABLE;
868
869 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 settings |= MGMT_SETTING_DISCOVERABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 settings |= MGMT_SETTING_BONDABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 settings |= MGMT_SETTING_BREDR;
880
881 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 settings |= MGMT_SETTING_LE;
883
884 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 settings |= MGMT_SETTING_LINK_SECURITY;
886
887 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 settings |= MGMT_SETTING_SSP;
889
890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 settings |= MGMT_SETTING_ADVERTISING;
892
893 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 settings |= MGMT_SETTING_SECURE_CONN;
895
896 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 settings |= MGMT_SETTING_DEBUG_KEYS;
898
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 settings |= MGMT_SETTING_PRIVACY;
901
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
905 *
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
909 *
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
912 * be evaluated.
913 */
914 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 settings |= MGMT_SETTING_STATIC_ADDRESS;
919 }
920
921 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923
924 if (cis_central_capable(hdev))
925 settings |= MGMT_SETTING_CIS_CENTRAL;
926
927 if (cis_peripheral_capable(hdev))
928 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929
930 if (bis_capable(hdev))
931 settings |= MGMT_SETTING_ISO_BROADCASTER;
932
933 if (sync_recv_capable(hdev))
934 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935
936 return settings;
937 }
938
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 struct mgmt_pending_cmd *cmd;
947
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
950 */
951 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (cmd) {
953 struct mgmt_mode *cp = cmd->param;
954 if (cp->val == 0x01)
955 return LE_AD_GENERAL;
956 else if (cp->val == 0x02)
957 return LE_AD_LIMITED;
958 } else {
959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 return LE_AD_LIMITED;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 return LE_AD_GENERAL;
963 }
964
965 return 0;
966 }
967
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 struct mgmt_pending_cmd *cmd;
971
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
974 */
975 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 if (cmd) {
977 struct mgmt_mode *cp = cmd->param;
978
979 return cp->val;
980 }
981
982 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 hci_update_eir_sync(hdev);
988 hci_update_class_sync(hdev);
989
990 return 0;
991 }
992
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 service_cache.work);
997
998 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 return;
1000
1001 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1008 * function.
1009 */
1010 if (ext_adv_capable(hdev))
1011 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 else
1013 return hci_enable_advertising_sync(hdev);
1014 }
1015
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 rpa_expired.work);
1020
1021 bt_dev_dbg(hdev, "");
1022
1023 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024
1025 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 return;
1027
1028 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 discov_off.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_lock(hdev);
1041
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1046 */
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1050
1051 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052
1053 mgmt_new_settings(hdev);
1054
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 u8 handle = mesh_tx->handle;
1064
1065 if (!silent)
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1068
1069 mgmt_mesh_remove(mesh_tx);
1070 }
1071
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 struct mgmt_mesh_tx *mesh_tx;
1075
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079
1080 if (mesh_tx)
1081 mesh_send_complete(hdev, mesh_tx, false);
1082
1083 return 0;
1084 }
1085
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (!mesh_tx)
1093 return;
1094
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1097
1098 if (err < 0)
1099 mesh_send_complete(hdev, mesh_tx, false);
1100 else
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1108
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 return;
1111
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 return;
1119
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1130 * it
1131 */
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1139 {
1140 struct mgmt_rp_read_info rp;
1141
1142 bt_dev_dbg(hdev, "sock %p", sk);
1143
1144 hci_dev_lock(hdev);
1145
1146 memset(&rp, 0, sizeof(rp));
1147
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1157
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160
1161 hci_dev_unlock(hdev);
1162
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 sizeof(rp));
1165 }
1166
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 u16 eir_len = 0;
1170 size_t name_len;
1171
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1175
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 hdev->appearance);
1179
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1183
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1187
1188 return eir_len;
1189 }
1190
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1193 {
1194 char buf[512];
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 u16 eir_len;
1197
1198 bt_dev_dbg(hdev, "sock %p", sk);
1199
1200 memset(&buf, 0, sizeof(buf));
1201
1202 hci_dev_lock(hdev);
1203
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211
1212
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1215
1216 hci_dev_unlock(hdev);
1217
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1221 * is used.
1222 */
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1229 }
1230
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 char buf[512];
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 u16 eir_len;
1236
1237 memset(buf, 0, sizeof(buf));
1238
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1241
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 struct mgmt_ev_advertising_added ev;
1258
1259 ev.instance = instance;
1260
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 u8 instance)
1266 {
1267 struct mgmt_ev_advertising_removed ev;
1268
1269 ev.instance = instance;
1270
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1279 }
1280 }
1281
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 struct hci_conn_params *p;
1286
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1290 */
1291 hci_pend_le_list_del_init(p);
1292
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 break;
1298 case HCI_AUTO_CONN_REPORT:
1299 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 break;
1301 default:
1302 break;
1303 }
1304 }
1305 }
1306
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1319
1320 /* Make sure cmd still outstanding. */
1321 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1322 return;
1323
1324 cp = cmd->param;
1325
1326 bt_dev_dbg(hdev, "err %d", err);
1327
1328 if (!err) {
1329 if (cp->val) {
1330 hci_dev_lock(hdev);
1331 restart_le_actions(hdev);
1332 hci_update_passive_scan(hdev);
1333 hci_dev_unlock(hdev);
1334 }
1335
1336 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1337
1338 /* Only call new_setting for power on as power off is deferred
1339 * to hdev->power_off work which does call hci_dev_do_close.
1340 */
1341 if (cp->val)
1342 new_settings(hdev, cmd->sk);
1343 } else {
1344 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1345 mgmt_status(err));
1346 }
1347
1348 mgmt_pending_remove(cmd);
1349 }
1350
set_powered_sync(struct hci_dev * hdev,void * data)1351 static int set_powered_sync(struct hci_dev *hdev, void *data)
1352 {
1353 struct mgmt_pending_cmd *cmd = data;
1354 struct mgmt_mode *cp = cmd->param;
1355
1356 BT_DBG("%s", hdev->name);
1357
1358 return hci_set_powered_sync(hdev, cp->val);
1359 }
1360
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1361 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1362 u16 len)
1363 {
1364 struct mgmt_mode *cp = data;
1365 struct mgmt_pending_cmd *cmd;
1366 int err;
1367
1368 bt_dev_dbg(hdev, "sock %p", sk);
1369
1370 if (cp->val != 0x00 && cp->val != 0x01)
1371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1372 MGMT_STATUS_INVALID_PARAMS);
1373
1374 hci_dev_lock(hdev);
1375
1376 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 MGMT_STATUS_BUSY);
1379 goto failed;
1380 }
1381
1382 if (!!cp->val == hdev_is_powered(hdev)) {
1383 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1384 goto failed;
1385 }
1386
1387 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1388 if (!cmd) {
1389 err = -ENOMEM;
1390 goto failed;
1391 }
1392
1393 /* Cancel potentially blocking sync operation before power off */
1394 if (cp->val == 0x00) {
1395 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1396 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1397 mgmt_set_powered_complete);
1398 } else {
1399 /* Use hci_cmd_sync_submit since hdev might not be running */
1400 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1401 mgmt_set_powered_complete);
1402 }
1403
1404 if (err < 0)
1405 mgmt_pending_remove(cmd);
1406
1407 failed:
1408 hci_dev_unlock(hdev);
1409 return err;
1410 }
1411
mgmt_new_settings(struct hci_dev * hdev)1412 int mgmt_new_settings(struct hci_dev *hdev)
1413 {
1414 return new_settings(hdev, NULL);
1415 }
1416
1417 struct cmd_lookup {
1418 struct sock *sk;
1419 struct hci_dev *hdev;
1420 u8 mgmt_status;
1421 };
1422
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1423 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1424 {
1425 struct cmd_lookup *match = data;
1426
1427 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1428
1429 list_del(&cmd->list);
1430
1431 if (match->sk == NULL) {
1432 match->sk = cmd->sk;
1433 sock_hold(match->sk);
1434 }
1435
1436 mgmt_pending_free(cmd);
1437 }
1438
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1439 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1440 {
1441 u8 *status = data;
1442
1443 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1444 mgmt_pending_remove(cmd);
1445 }
1446
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1447 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 {
1449 struct cmd_lookup *match = data;
1450
1451 /* dequeue cmd_sync entries using cmd as data as that is about to be
1452 * removed/freed.
1453 */
1454 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1455
1456 if (cmd->cmd_complete) {
1457 cmd->cmd_complete(cmd, match->mgmt_status);
1458 mgmt_pending_remove(cmd);
1459
1460 return;
1461 }
1462
1463 cmd_status_rsp(cmd, data);
1464 }
1465
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1466 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1467 {
1468 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1469 cmd->param, cmd->param_len);
1470 }
1471
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1472 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1473 {
1474 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1475 cmd->param, sizeof(struct mgmt_addr_info));
1476 }
1477
mgmt_bredr_support(struct hci_dev * hdev)1478 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1479 {
1480 if (!lmp_bredr_capable(hdev))
1481 return MGMT_STATUS_NOT_SUPPORTED;
1482 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1483 return MGMT_STATUS_REJECTED;
1484 else
1485 return MGMT_STATUS_SUCCESS;
1486 }
1487
mgmt_le_support(struct hci_dev * hdev)1488 static u8 mgmt_le_support(struct hci_dev *hdev)
1489 {
1490 if (!lmp_le_capable(hdev))
1491 return MGMT_STATUS_NOT_SUPPORTED;
1492 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1493 return MGMT_STATUS_REJECTED;
1494 else
1495 return MGMT_STATUS_SUCCESS;
1496 }
1497
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1498 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1499 int err)
1500 {
1501 struct mgmt_pending_cmd *cmd = data;
1502
1503 bt_dev_dbg(hdev, "err %d", err);
1504
1505 /* Make sure cmd still outstanding. */
1506 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1507 return;
1508
1509 hci_dev_lock(hdev);
1510
1511 if (err) {
1512 u8 mgmt_err = mgmt_status(err);
1513 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1514 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1515 goto done;
1516 }
1517
1518 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1519 hdev->discov_timeout > 0) {
1520 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1522 }
1523
1524 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1525 new_settings(hdev, cmd->sk);
1526
1527 done:
1528 mgmt_pending_remove(cmd);
1529 hci_dev_unlock(hdev);
1530 }
1531
set_discoverable_sync(struct hci_dev * hdev,void * data)1532 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1533 {
1534 BT_DBG("%s", hdev->name);
1535
1536 return hci_update_discoverable_sync(hdev);
1537 }
1538
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1539 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1540 u16 len)
1541 {
1542 struct mgmt_cp_set_discoverable *cp = data;
1543 struct mgmt_pending_cmd *cmd;
1544 u16 timeout;
1545 int err;
1546
1547 bt_dev_dbg(hdev, "sock %p", sk);
1548
1549 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1550 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 MGMT_STATUS_REJECTED);
1553
1554 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 MGMT_STATUS_INVALID_PARAMS);
1557
1558 timeout = __le16_to_cpu(cp->timeout);
1559
1560 /* Disabling discoverable requires that no timeout is set,
1561 * and enabling limited discoverable requires a timeout.
1562 */
1563 if ((cp->val == 0x00 && timeout > 0) ||
1564 (cp->val == 0x02 && timeout == 0))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1567
1568 hci_dev_lock(hdev);
1569
1570 if (!hdev_is_powered(hdev) && timeout > 0) {
1571 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_NOT_POWERED);
1573 goto failed;
1574 }
1575
1576 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1577 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 MGMT_STATUS_BUSY);
1580 goto failed;
1581 }
1582
1583 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 MGMT_STATUS_REJECTED);
1586 goto failed;
1587 }
1588
1589 if (hdev->advertising_paused) {
1590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591 MGMT_STATUS_BUSY);
1592 goto failed;
1593 }
1594
1595 if (!hdev_is_powered(hdev)) {
1596 bool changed = false;
1597
1598 /* Setting limited discoverable when powered off is
1599 * not a valid operation since it requires a timeout
1600 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1601 */
1602 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1603 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1604 changed = true;
1605 }
1606
1607 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1608 if (err < 0)
1609 goto failed;
1610
1611 if (changed)
1612 err = new_settings(hdev, sk);
1613
1614 goto failed;
1615 }
1616
1617 /* If the current mode is the same, then just update the timeout
1618 * value with the new value. And if only the timeout gets updated,
1619 * then no need for any HCI transactions.
1620 */
1621 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1622 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1623 HCI_LIMITED_DISCOVERABLE)) {
1624 cancel_delayed_work(&hdev->discov_off);
1625 hdev->discov_timeout = timeout;
1626
1627 if (cp->val && hdev->discov_timeout > 0) {
1628 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1629 queue_delayed_work(hdev->req_workqueue,
1630 &hdev->discov_off, to);
1631 }
1632
1633 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1634 goto failed;
1635 }
1636
1637 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1638 if (!cmd) {
1639 err = -ENOMEM;
1640 goto failed;
1641 }
1642
1643 /* Cancel any potential discoverable timeout that might be
1644 * still active and store new timeout value. The arming of
1645 * the timeout happens in the complete handler.
1646 */
1647 cancel_delayed_work(&hdev->discov_off);
1648 hdev->discov_timeout = timeout;
1649
1650 if (cp->val)
1651 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1652 else
1653 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1654
1655 /* Limited discoverable mode */
1656 if (cp->val == 0x02)
1657 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1658 else
1659 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1660
1661 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1662 mgmt_set_discoverable_complete);
1663
1664 if (err < 0)
1665 mgmt_pending_remove(cmd);
1666
1667 failed:
1668 hci_dev_unlock(hdev);
1669 return err;
1670 }
1671
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1672 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1673 int err)
1674 {
1675 struct mgmt_pending_cmd *cmd = data;
1676
1677 bt_dev_dbg(hdev, "err %d", err);
1678
1679 /* Make sure cmd still outstanding. */
1680 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1681 return;
1682
1683 hci_dev_lock(hdev);
1684
1685 if (err) {
1686 u8 mgmt_err = mgmt_status(err);
1687 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1688 goto done;
1689 }
1690
1691 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1692 new_settings(hdev, cmd->sk);
1693
1694 done:
1695 if (cmd)
1696 mgmt_pending_remove(cmd);
1697
1698 hci_dev_unlock(hdev);
1699 }
1700
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1701 static int set_connectable_update_settings(struct hci_dev *hdev,
1702 struct sock *sk, u8 val)
1703 {
1704 bool changed = false;
1705 int err;
1706
1707 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1708 changed = true;
1709
1710 if (val) {
1711 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1712 } else {
1713 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1714 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1715 }
1716
1717 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1718 if (err < 0)
1719 return err;
1720
1721 if (changed) {
1722 hci_update_scan(hdev);
1723 hci_update_passive_scan(hdev);
1724 return new_settings(hdev, sk);
1725 }
1726
1727 return 0;
1728 }
1729
set_connectable_sync(struct hci_dev * hdev,void * data)1730 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1731 {
1732 BT_DBG("%s", hdev->name);
1733
1734 return hci_update_connectable_sync(hdev);
1735 }
1736
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1737 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1738 u16 len)
1739 {
1740 struct mgmt_mode *cp = data;
1741 struct mgmt_pending_cmd *cmd;
1742 int err;
1743
1744 bt_dev_dbg(hdev, "sock %p", sk);
1745
1746 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1747 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749 MGMT_STATUS_REJECTED);
1750
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1753 MGMT_STATUS_INVALID_PARAMS);
1754
1755 hci_dev_lock(hdev);
1756
1757 if (!hdev_is_powered(hdev)) {
1758 err = set_connectable_update_settings(hdev, sk, cp->val);
1759 goto failed;
1760 }
1761
1762 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1763 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 MGMT_STATUS_BUSY);
1766 goto failed;
1767 }
1768
1769 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1770 if (!cmd) {
1771 err = -ENOMEM;
1772 goto failed;
1773 }
1774
1775 if (cp->val) {
1776 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1777 } else {
1778 if (hdev->discov_timeout > 0)
1779 cancel_delayed_work(&hdev->discov_off);
1780
1781 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1782 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1783 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1784 }
1785
1786 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1787 mgmt_set_connectable_complete);
1788
1789 if (err < 0)
1790 mgmt_pending_remove(cmd);
1791
1792 failed:
1793 hci_dev_unlock(hdev);
1794 return err;
1795 }
1796
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1797 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1798 u16 len)
1799 {
1800 struct mgmt_mode *cp = data;
1801 bool changed;
1802 int err;
1803
1804 bt_dev_dbg(hdev, "sock %p", sk);
1805
1806 if (cp->val != 0x00 && cp->val != 0x01)
1807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1808 MGMT_STATUS_INVALID_PARAMS);
1809
1810 hci_dev_lock(hdev);
1811
1812 if (cp->val)
1813 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1814 else
1815 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1816
1817 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1818 if (err < 0)
1819 goto unlock;
1820
1821 if (changed) {
1822 /* In limited privacy mode the change of bondable mode
1823 * may affect the local advertising address.
1824 */
1825 hci_update_discoverable(hdev);
1826
1827 err = new_settings(hdev, sk);
1828 }
1829
1830 unlock:
1831 hci_dev_unlock(hdev);
1832 return err;
1833 }
1834
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1835 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1836 u16 len)
1837 {
1838 struct mgmt_mode *cp = data;
1839 struct mgmt_pending_cmd *cmd;
1840 u8 val, status;
1841 int err;
1842
1843 bt_dev_dbg(hdev, "sock %p", sk);
1844
1845 status = mgmt_bredr_support(hdev);
1846 if (status)
1847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1848 status);
1849
1850 if (cp->val != 0x00 && cp->val != 0x01)
1851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1852 MGMT_STATUS_INVALID_PARAMS);
1853
1854 hci_dev_lock(hdev);
1855
1856 if (!hdev_is_powered(hdev)) {
1857 bool changed = false;
1858
1859 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1860 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1861 changed = true;
1862 }
1863
1864 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1865 if (err < 0)
1866 goto failed;
1867
1868 if (changed)
1869 err = new_settings(hdev, sk);
1870
1871 goto failed;
1872 }
1873
1874 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1875 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1876 MGMT_STATUS_BUSY);
1877 goto failed;
1878 }
1879
1880 val = !!cp->val;
1881
1882 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1883 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1884 goto failed;
1885 }
1886
1887 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1888 if (!cmd) {
1889 err = -ENOMEM;
1890 goto failed;
1891 }
1892
1893 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1894 if (err < 0) {
1895 mgmt_pending_remove(cmd);
1896 goto failed;
1897 }
1898
1899 failed:
1900 hci_dev_unlock(hdev);
1901 return err;
1902 }
1903
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1904 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1905 {
1906 struct cmd_lookup match = { NULL, hdev };
1907 struct mgmt_pending_cmd *cmd = data;
1908 struct mgmt_mode *cp = cmd->param;
1909 u8 enable = cp->val;
1910 bool changed;
1911
1912 /* Make sure cmd still outstanding. */
1913 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1914 return;
1915
1916 if (err) {
1917 u8 mgmt_err = mgmt_status(err);
1918
1919 if (enable && hci_dev_test_and_clear_flag(hdev,
1920 HCI_SSP_ENABLED)) {
1921 new_settings(hdev, NULL);
1922 }
1923
1924 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1925 &mgmt_err);
1926 return;
1927 }
1928
1929 if (enable) {
1930 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1931 } else {
1932 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1933 }
1934
1935 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1936
1937 if (changed)
1938 new_settings(hdev, match.sk);
1939
1940 if (match.sk)
1941 sock_put(match.sk);
1942
1943 hci_update_eir_sync(hdev);
1944 }
1945
set_ssp_sync(struct hci_dev * hdev,void * data)1946 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1947 {
1948 struct mgmt_pending_cmd *cmd = data;
1949 struct mgmt_mode *cp = cmd->param;
1950 bool changed = false;
1951 int err;
1952
1953 if (cp->val)
1954 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1955
1956 err = hci_write_ssp_mode_sync(hdev, cp->val);
1957
1958 if (!err && changed)
1959 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1960
1961 return err;
1962 }
1963
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1964 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1965 {
1966 struct mgmt_mode *cp = data;
1967 struct mgmt_pending_cmd *cmd;
1968 u8 status;
1969 int err;
1970
1971 bt_dev_dbg(hdev, "sock %p", sk);
1972
1973 status = mgmt_bredr_support(hdev);
1974 if (status)
1975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1976
1977 if (!lmp_ssp_capable(hdev))
1978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1979 MGMT_STATUS_NOT_SUPPORTED);
1980
1981 if (cp->val != 0x00 && cp->val != 0x01)
1982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1983 MGMT_STATUS_INVALID_PARAMS);
1984
1985 hci_dev_lock(hdev);
1986
1987 if (!hdev_is_powered(hdev)) {
1988 bool changed;
1989
1990 if (cp->val) {
1991 changed = !hci_dev_test_and_set_flag(hdev,
1992 HCI_SSP_ENABLED);
1993 } else {
1994 changed = hci_dev_test_and_clear_flag(hdev,
1995 HCI_SSP_ENABLED);
1996 }
1997
1998 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1999 if (err < 0)
2000 goto failed;
2001
2002 if (changed)
2003 err = new_settings(hdev, sk);
2004
2005 goto failed;
2006 }
2007
2008 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2009 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2010 MGMT_STATUS_BUSY);
2011 goto failed;
2012 }
2013
2014 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2015 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2016 goto failed;
2017 }
2018
2019 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2020 if (!cmd)
2021 err = -ENOMEM;
2022 else
2023 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2024 set_ssp_complete);
2025
2026 if (err < 0) {
2027 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2028 MGMT_STATUS_FAILED);
2029
2030 if (cmd)
2031 mgmt_pending_remove(cmd);
2032 }
2033
2034 failed:
2035 hci_dev_unlock(hdev);
2036 return err;
2037 }
2038
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2039 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2040 {
2041 bt_dev_dbg(hdev, "sock %p", sk);
2042
2043 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2044 MGMT_STATUS_NOT_SUPPORTED);
2045 }
2046
set_le_complete(struct hci_dev * hdev,void * data,int err)2047 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2048 {
2049 struct cmd_lookup match = { NULL, hdev };
2050 u8 status = mgmt_status(err);
2051
2052 bt_dev_dbg(hdev, "err %d", err);
2053
2054 if (status) {
2055 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2056 &status);
2057 return;
2058 }
2059
2060 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2061
2062 new_settings(hdev, match.sk);
2063
2064 if (match.sk)
2065 sock_put(match.sk);
2066 }
2067
set_le_sync(struct hci_dev * hdev,void * data)2068 static int set_le_sync(struct hci_dev *hdev, void *data)
2069 {
2070 struct mgmt_pending_cmd *cmd = data;
2071 struct mgmt_mode *cp = cmd->param;
2072 u8 val = !!cp->val;
2073 int err;
2074
2075 if (!val) {
2076 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2077
2078 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2079 hci_disable_advertising_sync(hdev);
2080
2081 if (ext_adv_capable(hdev))
2082 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2083 } else {
2084 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2085 }
2086
2087 err = hci_write_le_host_supported_sync(hdev, val, 0);
2088
2089 /* Make sure the controller has a good default for
2090 * advertising data. Restrict the update to when LE
2091 * has actually been enabled. During power on, the
2092 * update in powered_update_hci will take care of it.
2093 */
2094 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2095 if (ext_adv_capable(hdev)) {
2096 int status;
2097
2098 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2099 if (!status)
2100 hci_update_scan_rsp_data_sync(hdev, 0x00);
2101 } else {
2102 hci_update_adv_data_sync(hdev, 0x00);
2103 hci_update_scan_rsp_data_sync(hdev, 0x00);
2104 }
2105
2106 hci_update_passive_scan(hdev);
2107 }
2108
2109 return err;
2110 }
2111
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2112 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2113 {
2114 struct mgmt_pending_cmd *cmd = data;
2115 u8 status = mgmt_status(err);
2116 struct sock *sk = cmd->sk;
2117
2118 if (status) {
2119 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2120 cmd_status_rsp, &status);
2121 return;
2122 }
2123
2124 mgmt_pending_remove(cmd);
2125 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2126 }
2127
set_mesh_sync(struct hci_dev * hdev,void * data)2128 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2129 {
2130 struct mgmt_pending_cmd *cmd = data;
2131 struct mgmt_cp_set_mesh *cp = cmd->param;
2132 size_t len = cmd->param_len;
2133
2134 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2135
2136 if (cp->enable)
2137 hci_dev_set_flag(hdev, HCI_MESH);
2138 else
2139 hci_dev_clear_flag(hdev, HCI_MESH);
2140
2141 len -= sizeof(*cp);
2142
2143 /* If filters don't fit, forward all adv pkts */
2144 if (len <= sizeof(hdev->mesh_ad_types))
2145 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2146
2147 hci_update_passive_scan_sync(hdev);
2148 return 0;
2149 }
2150
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2151 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2152 {
2153 struct mgmt_cp_set_mesh *cp = data;
2154 struct mgmt_pending_cmd *cmd;
2155 int err = 0;
2156
2157 bt_dev_dbg(hdev, "sock %p", sk);
2158
2159 if (!lmp_le_capable(hdev) ||
2160 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2161 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2162 MGMT_STATUS_NOT_SUPPORTED);
2163
2164 if (cp->enable != 0x00 && cp->enable != 0x01)
2165 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2166 MGMT_STATUS_INVALID_PARAMS);
2167
2168 hci_dev_lock(hdev);
2169
2170 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2171 if (!cmd)
2172 err = -ENOMEM;
2173 else
2174 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2175 set_mesh_complete);
2176
2177 if (err < 0) {
2178 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2179 MGMT_STATUS_FAILED);
2180
2181 if (cmd)
2182 mgmt_pending_remove(cmd);
2183 }
2184
2185 hci_dev_unlock(hdev);
2186 return err;
2187 }
2188
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2189 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2190 {
2191 struct mgmt_mesh_tx *mesh_tx = data;
2192 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2193 unsigned long mesh_send_interval;
2194 u8 mgmt_err = mgmt_status(err);
2195
2196 /* Report any errors here, but don't report completion */
2197
2198 if (mgmt_err) {
2199 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2200 /* Send Complete Error Code for handle */
2201 mesh_send_complete(hdev, mesh_tx, false);
2202 return;
2203 }
2204
2205 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2206 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2207 mesh_send_interval);
2208 }
2209
mesh_send_sync(struct hci_dev * hdev,void * data)2210 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2211 {
2212 struct mgmt_mesh_tx *mesh_tx = data;
2213 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2214 struct adv_info *adv, *next_instance;
2215 u8 instance = hdev->le_num_of_adv_sets + 1;
2216 u16 timeout, duration;
2217 int err = 0;
2218
2219 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2220 return MGMT_STATUS_BUSY;
2221
2222 timeout = 1000;
2223 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2224 adv = hci_add_adv_instance(hdev, instance, 0,
2225 send->adv_data_len, send->adv_data,
2226 0, NULL,
2227 timeout, duration,
2228 HCI_ADV_TX_POWER_NO_PREFERENCE,
2229 hdev->le_adv_min_interval,
2230 hdev->le_adv_max_interval,
2231 mesh_tx->handle);
2232
2233 if (!IS_ERR(adv))
2234 mesh_tx->instance = instance;
2235 else
2236 err = PTR_ERR(adv);
2237
2238 if (hdev->cur_adv_instance == instance) {
2239 /* If the currently advertised instance is being changed then
2240 * cancel the current advertising and schedule the next
2241 * instance. If there is only one instance then the overridden
2242 * advertising data will be visible right away.
2243 */
2244 cancel_adv_timeout(hdev);
2245
2246 next_instance = hci_get_next_instance(hdev, instance);
2247 if (next_instance)
2248 instance = next_instance->instance;
2249 else
2250 instance = 0;
2251 } else if (hdev->adv_instance_timeout) {
2252 /* Immediately advertise the new instance if no other, or
2253 * let it go naturally from queue if ADV is already happening
2254 */
2255 instance = 0;
2256 }
2257
2258 if (instance)
2259 return hci_schedule_adv_instance_sync(hdev, instance, true);
2260
2261 return err;
2262 }
2263
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2264 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2265 {
2266 struct mgmt_rp_mesh_read_features *rp = data;
2267
2268 if (rp->used_handles >= rp->max_handles)
2269 return;
2270
2271 rp->handles[rp->used_handles++] = mesh_tx->handle;
2272 }
2273
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2274 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2275 void *data, u16 len)
2276 {
2277 struct mgmt_rp_mesh_read_features rp;
2278
2279 if (!lmp_le_capable(hdev) ||
2280 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2281 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2282 MGMT_STATUS_NOT_SUPPORTED);
2283
2284 memset(&rp, 0, sizeof(rp));
2285 rp.index = cpu_to_le16(hdev->id);
2286 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2287 rp.max_handles = MESH_HANDLES_MAX;
2288
2289 hci_dev_lock(hdev);
2290
2291 if (rp.max_handles)
2292 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2293
2294 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2295 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2296
2297 hci_dev_unlock(hdev);
2298 return 0;
2299 }
2300
send_cancel(struct hci_dev * hdev,void * data)2301 static int send_cancel(struct hci_dev *hdev, void *data)
2302 {
2303 struct mgmt_pending_cmd *cmd = data;
2304 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2305 struct mgmt_mesh_tx *mesh_tx;
2306
2307 if (!cancel->handle) {
2308 do {
2309 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2310
2311 if (mesh_tx)
2312 mesh_send_complete(hdev, mesh_tx, false);
2313 } while (mesh_tx);
2314 } else {
2315 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2316
2317 if (mesh_tx && mesh_tx->sk == cmd->sk)
2318 mesh_send_complete(hdev, mesh_tx, false);
2319 }
2320
2321 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2322 0, NULL, 0);
2323 mgmt_pending_free(cmd);
2324
2325 return 0;
2326 }
2327
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2328 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2329 void *data, u16 len)
2330 {
2331 struct mgmt_pending_cmd *cmd;
2332 int err;
2333
2334 if (!lmp_le_capable(hdev) ||
2335 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2336 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2337 MGMT_STATUS_NOT_SUPPORTED);
2338
2339 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2340 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2341 MGMT_STATUS_REJECTED);
2342
2343 hci_dev_lock(hdev);
2344 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2345 if (!cmd)
2346 err = -ENOMEM;
2347 else
2348 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2349
2350 if (err < 0) {
2351 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2352 MGMT_STATUS_FAILED);
2353
2354 if (cmd)
2355 mgmt_pending_free(cmd);
2356 }
2357
2358 hci_dev_unlock(hdev);
2359 return err;
2360 }
2361
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2362 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2363 {
2364 struct mgmt_mesh_tx *mesh_tx;
2365 struct mgmt_cp_mesh_send *send = data;
2366 struct mgmt_rp_mesh_read_features rp;
2367 bool sending;
2368 int err = 0;
2369
2370 if (!lmp_le_capable(hdev) ||
2371 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2373 MGMT_STATUS_NOT_SUPPORTED);
2374 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2375 len <= MGMT_MESH_SEND_SIZE ||
2376 len > (MGMT_MESH_SEND_SIZE + 31))
2377 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2378 MGMT_STATUS_REJECTED);
2379
2380 hci_dev_lock(hdev);
2381
2382 memset(&rp, 0, sizeof(rp));
2383 rp.max_handles = MESH_HANDLES_MAX;
2384
2385 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2386
2387 if (rp.max_handles <= rp.used_handles) {
2388 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2389 MGMT_STATUS_BUSY);
2390 goto done;
2391 }
2392
2393 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2394 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2395
2396 if (!mesh_tx)
2397 err = -ENOMEM;
2398 else if (!sending)
2399 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2400 mesh_send_start_complete);
2401
2402 if (err < 0) {
2403 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2404 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2405 MGMT_STATUS_FAILED);
2406
2407 if (mesh_tx) {
2408 if (sending)
2409 mgmt_mesh_remove(mesh_tx);
2410 }
2411 } else {
2412 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2413
2414 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2415 &mesh_tx->handle, 1);
2416 }
2417
2418 done:
2419 hci_dev_unlock(hdev);
2420 return err;
2421 }
2422
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2423 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2424 {
2425 struct mgmt_mode *cp = data;
2426 struct mgmt_pending_cmd *cmd;
2427 int err;
2428 u8 val, enabled;
2429
2430 bt_dev_dbg(hdev, "sock %p", sk);
2431
2432 if (!lmp_le_capable(hdev))
2433 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2434 MGMT_STATUS_NOT_SUPPORTED);
2435
2436 if (cp->val != 0x00 && cp->val != 0x01)
2437 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2438 MGMT_STATUS_INVALID_PARAMS);
2439
2440 /* Bluetooth single mode LE only controllers or dual-mode
2441 * controllers configured as LE only devices, do not allow
2442 * switching LE off. These have either LE enabled explicitly
2443 * or BR/EDR has been previously switched off.
2444 *
2445 * When trying to enable an already enabled LE, then gracefully
2446 * send a positive response. Trying to disable it however will
2447 * result into rejection.
2448 */
2449 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2450 if (cp->val == 0x01)
2451 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2452
2453 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2454 MGMT_STATUS_REJECTED);
2455 }
2456
2457 hci_dev_lock(hdev);
2458
2459 val = !!cp->val;
2460 enabled = lmp_host_le_capable(hdev);
2461
2462 if (!hdev_is_powered(hdev) || val == enabled) {
2463 bool changed = false;
2464
2465 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2466 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2467 changed = true;
2468 }
2469
2470 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2471 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2472 changed = true;
2473 }
2474
2475 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2476 if (err < 0)
2477 goto unlock;
2478
2479 if (changed)
2480 err = new_settings(hdev, sk);
2481
2482 goto unlock;
2483 }
2484
2485 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2486 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2487 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2488 MGMT_STATUS_BUSY);
2489 goto unlock;
2490 }
2491
2492 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2493 if (!cmd)
2494 err = -ENOMEM;
2495 else
2496 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2497 set_le_complete);
2498
2499 if (err < 0) {
2500 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2501 MGMT_STATUS_FAILED);
2502
2503 if (cmd)
2504 mgmt_pending_remove(cmd);
2505 }
2506
2507 unlock:
2508 hci_dev_unlock(hdev);
2509 return err;
2510 }
2511
2512 /* This is a helper function to test for pending mgmt commands that can
2513 * cause CoD or EIR HCI commands. We can only allow one such pending
2514 * mgmt command at a time since otherwise we cannot easily track what
2515 * the current values are, will be, and based on that calculate if a new
2516 * HCI command needs to be sent and if yes with what value.
2517 */
pending_eir_or_class(struct hci_dev * hdev)2518 static bool pending_eir_or_class(struct hci_dev *hdev)
2519 {
2520 struct mgmt_pending_cmd *cmd;
2521
2522 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2523 switch (cmd->opcode) {
2524 case MGMT_OP_ADD_UUID:
2525 case MGMT_OP_REMOVE_UUID:
2526 case MGMT_OP_SET_DEV_CLASS:
2527 case MGMT_OP_SET_POWERED:
2528 return true;
2529 }
2530 }
2531
2532 return false;
2533 }
2534
2535 static const u8 bluetooth_base_uuid[] = {
2536 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2537 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2538 };
2539
get_uuid_size(const u8 * uuid)2540 static u8 get_uuid_size(const u8 *uuid)
2541 {
2542 u32 val;
2543
2544 if (memcmp(uuid, bluetooth_base_uuid, 12))
2545 return 128;
2546
2547 val = get_unaligned_le32(&uuid[12]);
2548 if (val > 0xffff)
2549 return 32;
2550
2551 return 16;
2552 }
2553
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2554 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2555 {
2556 struct mgmt_pending_cmd *cmd = data;
2557
2558 bt_dev_dbg(hdev, "err %d", err);
2559
2560 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2561 mgmt_status(err), hdev->dev_class, 3);
2562
2563 mgmt_pending_free(cmd);
2564 }
2565
add_uuid_sync(struct hci_dev * hdev,void * data)2566 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2567 {
2568 int err;
2569
2570 err = hci_update_class_sync(hdev);
2571 if (err)
2572 return err;
2573
2574 return hci_update_eir_sync(hdev);
2575 }
2576
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2577 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2578 {
2579 struct mgmt_cp_add_uuid *cp = data;
2580 struct mgmt_pending_cmd *cmd;
2581 struct bt_uuid *uuid;
2582 int err;
2583
2584 bt_dev_dbg(hdev, "sock %p", sk);
2585
2586 hci_dev_lock(hdev);
2587
2588 if (pending_eir_or_class(hdev)) {
2589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2590 MGMT_STATUS_BUSY);
2591 goto failed;
2592 }
2593
2594 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2595 if (!uuid) {
2596 err = -ENOMEM;
2597 goto failed;
2598 }
2599
2600 memcpy(uuid->uuid, cp->uuid, 16);
2601 uuid->svc_hint = cp->svc_hint;
2602 uuid->size = get_uuid_size(cp->uuid);
2603
2604 list_add_tail(&uuid->list, &hdev->uuids);
2605
2606 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2607 if (!cmd) {
2608 err = -ENOMEM;
2609 goto failed;
2610 }
2611
2612 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2613 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2614 */
2615 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2616 mgmt_class_complete);
2617 if (err < 0) {
2618 mgmt_pending_free(cmd);
2619 goto failed;
2620 }
2621
2622 failed:
2623 hci_dev_unlock(hdev);
2624 return err;
2625 }
2626
enable_service_cache(struct hci_dev * hdev)2627 static bool enable_service_cache(struct hci_dev *hdev)
2628 {
2629 if (!hdev_is_powered(hdev))
2630 return false;
2631
2632 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2633 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2634 CACHE_TIMEOUT);
2635 return true;
2636 }
2637
2638 return false;
2639 }
2640
remove_uuid_sync(struct hci_dev * hdev,void * data)2641 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2642 {
2643 int err;
2644
2645 err = hci_update_class_sync(hdev);
2646 if (err)
2647 return err;
2648
2649 return hci_update_eir_sync(hdev);
2650 }
2651
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2652 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2653 u16 len)
2654 {
2655 struct mgmt_cp_remove_uuid *cp = data;
2656 struct mgmt_pending_cmd *cmd;
2657 struct bt_uuid *match, *tmp;
2658 static const u8 bt_uuid_any[] = {
2659 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2660 };
2661 int err, found;
2662
2663 bt_dev_dbg(hdev, "sock %p", sk);
2664
2665 hci_dev_lock(hdev);
2666
2667 if (pending_eir_or_class(hdev)) {
2668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2669 MGMT_STATUS_BUSY);
2670 goto unlock;
2671 }
2672
2673 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2674 hci_uuids_clear(hdev);
2675
2676 if (enable_service_cache(hdev)) {
2677 err = mgmt_cmd_complete(sk, hdev->id,
2678 MGMT_OP_REMOVE_UUID,
2679 0, hdev->dev_class, 3);
2680 goto unlock;
2681 }
2682
2683 goto update_class;
2684 }
2685
2686 found = 0;
2687
2688 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2689 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2690 continue;
2691
2692 list_del(&match->list);
2693 kfree(match);
2694 found++;
2695 }
2696
2697 if (found == 0) {
2698 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2699 MGMT_STATUS_INVALID_PARAMS);
2700 goto unlock;
2701 }
2702
2703 update_class:
2704 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2705 if (!cmd) {
2706 err = -ENOMEM;
2707 goto unlock;
2708 }
2709
2710 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2711 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2712 */
2713 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2714 mgmt_class_complete);
2715 if (err < 0)
2716 mgmt_pending_free(cmd);
2717
2718 unlock:
2719 hci_dev_unlock(hdev);
2720 return err;
2721 }
2722
set_class_sync(struct hci_dev * hdev,void * data)2723 static int set_class_sync(struct hci_dev *hdev, void *data)
2724 {
2725 int err = 0;
2726
2727 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2728 cancel_delayed_work_sync(&hdev->service_cache);
2729 err = hci_update_eir_sync(hdev);
2730 }
2731
2732 if (err)
2733 return err;
2734
2735 return hci_update_class_sync(hdev);
2736 }
2737
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2738 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2739 u16 len)
2740 {
2741 struct mgmt_cp_set_dev_class *cp = data;
2742 struct mgmt_pending_cmd *cmd;
2743 int err;
2744
2745 bt_dev_dbg(hdev, "sock %p", sk);
2746
2747 if (!lmp_bredr_capable(hdev))
2748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2749 MGMT_STATUS_NOT_SUPPORTED);
2750
2751 hci_dev_lock(hdev);
2752
2753 if (pending_eir_or_class(hdev)) {
2754 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2755 MGMT_STATUS_BUSY);
2756 goto unlock;
2757 }
2758
2759 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2760 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2761 MGMT_STATUS_INVALID_PARAMS);
2762 goto unlock;
2763 }
2764
2765 hdev->major_class = cp->major;
2766 hdev->minor_class = cp->minor;
2767
2768 if (!hdev_is_powered(hdev)) {
2769 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2770 hdev->dev_class, 3);
2771 goto unlock;
2772 }
2773
2774 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2775 if (!cmd) {
2776 err = -ENOMEM;
2777 goto unlock;
2778 }
2779
2780 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2781 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2782 */
2783 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2784 mgmt_class_complete);
2785 if (err < 0)
2786 mgmt_pending_free(cmd);
2787
2788 unlock:
2789 hci_dev_unlock(hdev);
2790 return err;
2791 }
2792
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2793 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2794 u16 len)
2795 {
2796 struct mgmt_cp_load_link_keys *cp = data;
2797 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2798 sizeof(struct mgmt_link_key_info));
2799 u16 key_count, expected_len;
2800 bool changed;
2801 int i;
2802
2803 bt_dev_dbg(hdev, "sock %p", sk);
2804
2805 if (!lmp_bredr_capable(hdev))
2806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2807 MGMT_STATUS_NOT_SUPPORTED);
2808
2809 key_count = __le16_to_cpu(cp->key_count);
2810 if (key_count > max_key_count) {
2811 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2812 key_count);
2813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2814 MGMT_STATUS_INVALID_PARAMS);
2815 }
2816
2817 expected_len = struct_size(cp, keys, key_count);
2818 if (expected_len != len) {
2819 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2820 expected_len, len);
2821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2822 MGMT_STATUS_INVALID_PARAMS);
2823 }
2824
2825 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2827 MGMT_STATUS_INVALID_PARAMS);
2828
2829 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2830 key_count);
2831
2832 hci_dev_lock(hdev);
2833
2834 hci_link_keys_clear(hdev);
2835
2836 if (cp->debug_keys)
2837 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2838 else
2839 changed = hci_dev_test_and_clear_flag(hdev,
2840 HCI_KEEP_DEBUG_KEYS);
2841
2842 if (changed)
2843 new_settings(hdev, NULL);
2844
2845 for (i = 0; i < key_count; i++) {
2846 struct mgmt_link_key_info *key = &cp->keys[i];
2847
2848 if (hci_is_blocked_key(hdev,
2849 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2850 key->val)) {
2851 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2852 &key->addr.bdaddr);
2853 continue;
2854 }
2855
2856 if (key->addr.type != BDADDR_BREDR) {
2857 bt_dev_warn(hdev,
2858 "Invalid link address type %u for %pMR",
2859 key->addr.type, &key->addr.bdaddr);
2860 continue;
2861 }
2862
2863 if (key->type > 0x08) {
2864 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2865 key->type, &key->addr.bdaddr);
2866 continue;
2867 }
2868
2869 /* Always ignore debug keys and require a new pairing if
2870 * the user wants to use them.
2871 */
2872 if (key->type == HCI_LK_DEBUG_COMBINATION)
2873 continue;
2874
2875 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2876 key->type, key->pin_len, NULL);
2877 }
2878
2879 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2880
2881 hci_dev_unlock(hdev);
2882
2883 return 0;
2884 }
2885
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2886 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2887 u8 addr_type, struct sock *skip_sk)
2888 {
2889 struct mgmt_ev_device_unpaired ev;
2890
2891 bacpy(&ev.addr.bdaddr, bdaddr);
2892 ev.addr.type = addr_type;
2893
2894 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2895 skip_sk);
2896 }
2897
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2898 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2899 {
2900 struct mgmt_pending_cmd *cmd = data;
2901 struct mgmt_cp_unpair_device *cp = cmd->param;
2902
2903 if (!err)
2904 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2905
2906 cmd->cmd_complete(cmd, err);
2907 mgmt_pending_free(cmd);
2908 }
2909
unpair_device_sync(struct hci_dev * hdev,void * data)2910 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2911 {
2912 struct mgmt_pending_cmd *cmd = data;
2913 struct mgmt_cp_unpair_device *cp = cmd->param;
2914 struct hci_conn *conn;
2915
2916 if (cp->addr.type == BDADDR_BREDR)
2917 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2918 &cp->addr.bdaddr);
2919 else
2920 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2921 le_addr_type(cp->addr.type));
2922
2923 if (!conn)
2924 return 0;
2925
2926 /* Disregard any possible error since the likes of hci_abort_conn_sync
2927 * will clean up the connection no matter the error.
2928 */
2929 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2930
2931 return 0;
2932 }
2933
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2934 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2935 u16 len)
2936 {
2937 struct mgmt_cp_unpair_device *cp = data;
2938 struct mgmt_rp_unpair_device rp;
2939 struct hci_conn_params *params;
2940 struct mgmt_pending_cmd *cmd;
2941 struct hci_conn *conn;
2942 u8 addr_type;
2943 int err;
2944
2945 memset(&rp, 0, sizeof(rp));
2946 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2947 rp.addr.type = cp->addr.type;
2948
2949 if (!bdaddr_type_is_valid(cp->addr.type))
2950 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2951 MGMT_STATUS_INVALID_PARAMS,
2952 &rp, sizeof(rp));
2953
2954 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2955 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2956 MGMT_STATUS_INVALID_PARAMS,
2957 &rp, sizeof(rp));
2958
2959 hci_dev_lock(hdev);
2960
2961 if (!hdev_is_powered(hdev)) {
2962 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2963 MGMT_STATUS_NOT_POWERED, &rp,
2964 sizeof(rp));
2965 goto unlock;
2966 }
2967
2968 if (cp->addr.type == BDADDR_BREDR) {
2969 /* If disconnection is requested, then look up the
2970 * connection. If the remote device is connected, it
2971 * will be later used to terminate the link.
2972 *
2973 * Setting it to NULL explicitly will cause no
2974 * termination of the link.
2975 */
2976 if (cp->disconnect)
2977 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2978 &cp->addr.bdaddr);
2979 else
2980 conn = NULL;
2981
2982 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2983 if (err < 0) {
2984 err = mgmt_cmd_complete(sk, hdev->id,
2985 MGMT_OP_UNPAIR_DEVICE,
2986 MGMT_STATUS_NOT_PAIRED, &rp,
2987 sizeof(rp));
2988 goto unlock;
2989 }
2990
2991 goto done;
2992 }
2993
2994 /* LE address type */
2995 addr_type = le_addr_type(cp->addr.type);
2996
2997 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2998 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2999 if (err < 0) {
3000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3001 MGMT_STATUS_NOT_PAIRED, &rp,
3002 sizeof(rp));
3003 goto unlock;
3004 }
3005
3006 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3007 if (!conn) {
3008 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3009 goto done;
3010 }
3011
3012
3013 /* Defer clearing up the connection parameters until closing to
3014 * give a chance of keeping them if a repairing happens.
3015 */
3016 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3017
3018 /* Disable auto-connection parameters if present */
3019 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3020 if (params) {
3021 if (params->explicit_connect)
3022 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3023 else
3024 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3025 }
3026
3027 /* If disconnection is not requested, then clear the connection
3028 * variable so that the link is not terminated.
3029 */
3030 if (!cp->disconnect)
3031 conn = NULL;
3032
3033 done:
3034 /* If the connection variable is set, then termination of the
3035 * link is requested.
3036 */
3037 if (!conn) {
3038 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3039 &rp, sizeof(rp));
3040 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3041 goto unlock;
3042 }
3043
3044 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3045 sizeof(*cp));
3046 if (!cmd) {
3047 err = -ENOMEM;
3048 goto unlock;
3049 }
3050
3051 cmd->cmd_complete = addr_cmd_complete;
3052
3053 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3054 unpair_device_complete);
3055 if (err < 0)
3056 mgmt_pending_free(cmd);
3057
3058 unlock:
3059 hci_dev_unlock(hdev);
3060 return err;
3061 }
3062
disconnect_complete(struct hci_dev * hdev,void * data,int err)3063 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3064 {
3065 struct mgmt_pending_cmd *cmd = data;
3066
3067 cmd->cmd_complete(cmd, mgmt_status(err));
3068 mgmt_pending_free(cmd);
3069 }
3070
disconnect_sync(struct hci_dev * hdev,void * data)3071 static int disconnect_sync(struct hci_dev *hdev, void *data)
3072 {
3073 struct mgmt_pending_cmd *cmd = data;
3074 struct mgmt_cp_disconnect *cp = cmd->param;
3075 struct hci_conn *conn;
3076
3077 if (cp->addr.type == BDADDR_BREDR)
3078 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3079 &cp->addr.bdaddr);
3080 else
3081 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3082 le_addr_type(cp->addr.type));
3083
3084 if (!conn)
3085 return -ENOTCONN;
3086
3087 /* Disregard any possible error since the likes of hci_abort_conn_sync
3088 * will clean up the connection no matter the error.
3089 */
3090 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3091
3092 return 0;
3093 }
3094
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3095 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3096 u16 len)
3097 {
3098 struct mgmt_cp_disconnect *cp = data;
3099 struct mgmt_rp_disconnect rp;
3100 struct mgmt_pending_cmd *cmd;
3101 int err;
3102
3103 bt_dev_dbg(hdev, "sock %p", sk);
3104
3105 memset(&rp, 0, sizeof(rp));
3106 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3107 rp.addr.type = cp->addr.type;
3108
3109 if (!bdaddr_type_is_valid(cp->addr.type))
3110 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3111 MGMT_STATUS_INVALID_PARAMS,
3112 &rp, sizeof(rp));
3113
3114 hci_dev_lock(hdev);
3115
3116 if (!test_bit(HCI_UP, &hdev->flags)) {
3117 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3118 MGMT_STATUS_NOT_POWERED, &rp,
3119 sizeof(rp));
3120 goto failed;
3121 }
3122
3123 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3124 if (!cmd) {
3125 err = -ENOMEM;
3126 goto failed;
3127 }
3128
3129 cmd->cmd_complete = generic_cmd_complete;
3130
3131 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3132 disconnect_complete);
3133 if (err < 0)
3134 mgmt_pending_free(cmd);
3135
3136 failed:
3137 hci_dev_unlock(hdev);
3138 return err;
3139 }
3140
link_to_bdaddr(u8 link_type,u8 addr_type)3141 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3142 {
3143 switch (link_type) {
3144 case ISO_LINK:
3145 case LE_LINK:
3146 switch (addr_type) {
3147 case ADDR_LE_DEV_PUBLIC:
3148 return BDADDR_LE_PUBLIC;
3149
3150 default:
3151 /* Fallback to LE Random address type */
3152 return BDADDR_LE_RANDOM;
3153 }
3154
3155 default:
3156 /* Fallback to BR/EDR type */
3157 return BDADDR_BREDR;
3158 }
3159 }
3160
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3161 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3162 u16 data_len)
3163 {
3164 struct mgmt_rp_get_connections *rp;
3165 struct hci_conn *c;
3166 int err;
3167 u16 i;
3168
3169 bt_dev_dbg(hdev, "sock %p", sk);
3170
3171 hci_dev_lock(hdev);
3172
3173 if (!hdev_is_powered(hdev)) {
3174 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3175 MGMT_STATUS_NOT_POWERED);
3176 goto unlock;
3177 }
3178
3179 i = 0;
3180 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3181 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3182 i++;
3183 }
3184
3185 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3186 if (!rp) {
3187 err = -ENOMEM;
3188 goto unlock;
3189 }
3190
3191 i = 0;
3192 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3193 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3194 continue;
3195 bacpy(&rp->addr[i].bdaddr, &c->dst);
3196 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3197 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3198 continue;
3199 i++;
3200 }
3201
3202 rp->conn_count = cpu_to_le16(i);
3203
3204 /* Recalculate length in case of filtered SCO connections, etc */
3205 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3206 struct_size(rp, addr, i));
3207
3208 kfree(rp);
3209
3210 unlock:
3211 hci_dev_unlock(hdev);
3212 return err;
3213 }
3214
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3215 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3216 struct mgmt_cp_pin_code_neg_reply *cp)
3217 {
3218 struct mgmt_pending_cmd *cmd;
3219 int err;
3220
3221 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3222 sizeof(*cp));
3223 if (!cmd)
3224 return -ENOMEM;
3225
3226 cmd->cmd_complete = addr_cmd_complete;
3227
3228 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3229 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3230 if (err < 0)
3231 mgmt_pending_remove(cmd);
3232
3233 return err;
3234 }
3235
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3236 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3237 u16 len)
3238 {
3239 struct hci_conn *conn;
3240 struct mgmt_cp_pin_code_reply *cp = data;
3241 struct hci_cp_pin_code_reply reply;
3242 struct mgmt_pending_cmd *cmd;
3243 int err;
3244
3245 bt_dev_dbg(hdev, "sock %p", sk);
3246
3247 hci_dev_lock(hdev);
3248
3249 if (!hdev_is_powered(hdev)) {
3250 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3251 MGMT_STATUS_NOT_POWERED);
3252 goto failed;
3253 }
3254
3255 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3256 if (!conn) {
3257 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3258 MGMT_STATUS_NOT_CONNECTED);
3259 goto failed;
3260 }
3261
3262 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3263 struct mgmt_cp_pin_code_neg_reply ncp;
3264
3265 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3266
3267 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3268
3269 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3270 if (err >= 0)
3271 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3272 MGMT_STATUS_INVALID_PARAMS);
3273
3274 goto failed;
3275 }
3276
3277 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3278 if (!cmd) {
3279 err = -ENOMEM;
3280 goto failed;
3281 }
3282
3283 cmd->cmd_complete = addr_cmd_complete;
3284
3285 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3286 reply.pin_len = cp->pin_len;
3287 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3288
3289 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3290 if (err < 0)
3291 mgmt_pending_remove(cmd);
3292
3293 failed:
3294 hci_dev_unlock(hdev);
3295 return err;
3296 }
3297
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3298 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3299 u16 len)
3300 {
3301 struct mgmt_cp_set_io_capability *cp = data;
3302
3303 bt_dev_dbg(hdev, "sock %p", sk);
3304
3305 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3306 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3307 MGMT_STATUS_INVALID_PARAMS);
3308
3309 hci_dev_lock(hdev);
3310
3311 hdev->io_capability = cp->io_capability;
3312
3313 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3314
3315 hci_dev_unlock(hdev);
3316
3317 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3318 NULL, 0);
3319 }
3320
find_pairing(struct hci_conn * conn)3321 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3322 {
3323 struct hci_dev *hdev = conn->hdev;
3324 struct mgmt_pending_cmd *cmd;
3325
3326 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3327 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3328 continue;
3329
3330 if (cmd->user_data != conn)
3331 continue;
3332
3333 return cmd;
3334 }
3335
3336 return NULL;
3337 }
3338
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3339 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3340 {
3341 struct mgmt_rp_pair_device rp;
3342 struct hci_conn *conn = cmd->user_data;
3343 int err;
3344
3345 bacpy(&rp.addr.bdaddr, &conn->dst);
3346 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3347
3348 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3349 status, &rp, sizeof(rp));
3350
3351 /* So we don't get further callbacks for this connection */
3352 conn->connect_cfm_cb = NULL;
3353 conn->security_cfm_cb = NULL;
3354 conn->disconn_cfm_cb = NULL;
3355
3356 hci_conn_drop(conn);
3357
3358 /* The device is paired so there is no need to remove
3359 * its connection parameters anymore.
3360 */
3361 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3362
3363 hci_conn_put(conn);
3364
3365 return err;
3366 }
3367
mgmt_smp_complete(struct hci_conn * conn,bool complete)3368 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3369 {
3370 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3371 struct mgmt_pending_cmd *cmd;
3372
3373 cmd = find_pairing(conn);
3374 if (cmd) {
3375 cmd->cmd_complete(cmd, status);
3376 mgmt_pending_remove(cmd);
3377 }
3378 }
3379
pairing_complete_cb(struct hci_conn * conn,u8 status)3380 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3381 {
3382 struct mgmt_pending_cmd *cmd;
3383
3384 BT_DBG("status %u", status);
3385
3386 cmd = find_pairing(conn);
3387 if (!cmd) {
3388 BT_DBG("Unable to find a pending command");
3389 return;
3390 }
3391
3392 cmd->cmd_complete(cmd, mgmt_status(status));
3393 mgmt_pending_remove(cmd);
3394 }
3395
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3396 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3397 {
3398 struct mgmt_pending_cmd *cmd;
3399
3400 BT_DBG("status %u", status);
3401
3402 if (!status)
3403 return;
3404
3405 cmd = find_pairing(conn);
3406 if (!cmd) {
3407 BT_DBG("Unable to find a pending command");
3408 return;
3409 }
3410
3411 cmd->cmd_complete(cmd, mgmt_status(status));
3412 mgmt_pending_remove(cmd);
3413 }
3414
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3415 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3416 u16 len)
3417 {
3418 struct mgmt_cp_pair_device *cp = data;
3419 struct mgmt_rp_pair_device rp;
3420 struct mgmt_pending_cmd *cmd;
3421 u8 sec_level, auth_type;
3422 struct hci_conn *conn;
3423 int err;
3424
3425 bt_dev_dbg(hdev, "sock %p", sk);
3426
3427 memset(&rp, 0, sizeof(rp));
3428 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3429 rp.addr.type = cp->addr.type;
3430
3431 if (!bdaddr_type_is_valid(cp->addr.type))
3432 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3433 MGMT_STATUS_INVALID_PARAMS,
3434 &rp, sizeof(rp));
3435
3436 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3437 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3438 MGMT_STATUS_INVALID_PARAMS,
3439 &rp, sizeof(rp));
3440
3441 hci_dev_lock(hdev);
3442
3443 if (!hdev_is_powered(hdev)) {
3444 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3445 MGMT_STATUS_NOT_POWERED, &rp,
3446 sizeof(rp));
3447 goto unlock;
3448 }
3449
3450 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3451 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3452 MGMT_STATUS_ALREADY_PAIRED, &rp,
3453 sizeof(rp));
3454 goto unlock;
3455 }
3456
3457 sec_level = BT_SECURITY_MEDIUM;
3458 auth_type = HCI_AT_DEDICATED_BONDING;
3459
3460 if (cp->addr.type == BDADDR_BREDR) {
3461 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3462 auth_type, CONN_REASON_PAIR_DEVICE);
3463 } else {
3464 u8 addr_type = le_addr_type(cp->addr.type);
3465 struct hci_conn_params *p;
3466
3467 /* When pairing a new device, it is expected to remember
3468 * this device for future connections. Adding the connection
3469 * parameter information ahead of time allows tracking
3470 * of the peripheral preferred values and will speed up any
3471 * further connection establishment.
3472 *
3473 * If connection parameters already exist, then they
3474 * will be kept and this function does nothing.
3475 */
3476 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3477 if (!p) {
3478 err = -EIO;
3479 goto unlock;
3480 }
3481
3482 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3483 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3484
3485 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3486 sec_level, HCI_LE_CONN_TIMEOUT,
3487 CONN_REASON_PAIR_DEVICE);
3488 }
3489
3490 if (IS_ERR(conn)) {
3491 int status;
3492
3493 if (PTR_ERR(conn) == -EBUSY)
3494 status = MGMT_STATUS_BUSY;
3495 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3496 status = MGMT_STATUS_NOT_SUPPORTED;
3497 else if (PTR_ERR(conn) == -ECONNREFUSED)
3498 status = MGMT_STATUS_REJECTED;
3499 else
3500 status = MGMT_STATUS_CONNECT_FAILED;
3501
3502 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3503 status, &rp, sizeof(rp));
3504 goto unlock;
3505 }
3506
3507 if (conn->connect_cfm_cb) {
3508 hci_conn_drop(conn);
3509 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3510 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3511 goto unlock;
3512 }
3513
3514 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3515 if (!cmd) {
3516 err = -ENOMEM;
3517 hci_conn_drop(conn);
3518 goto unlock;
3519 }
3520
3521 cmd->cmd_complete = pairing_complete;
3522
3523 /* For LE, just connecting isn't a proof that the pairing finished */
3524 if (cp->addr.type == BDADDR_BREDR) {
3525 conn->connect_cfm_cb = pairing_complete_cb;
3526 conn->security_cfm_cb = pairing_complete_cb;
3527 conn->disconn_cfm_cb = pairing_complete_cb;
3528 } else {
3529 conn->connect_cfm_cb = le_pairing_complete_cb;
3530 conn->security_cfm_cb = le_pairing_complete_cb;
3531 conn->disconn_cfm_cb = le_pairing_complete_cb;
3532 }
3533
3534 conn->io_capability = cp->io_cap;
3535 cmd->user_data = hci_conn_get(conn);
3536
3537 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3538 hci_conn_security(conn, sec_level, auth_type, true)) {
3539 cmd->cmd_complete(cmd, 0);
3540 mgmt_pending_remove(cmd);
3541 }
3542
3543 err = 0;
3544
3545 unlock:
3546 hci_dev_unlock(hdev);
3547 return err;
3548 }
3549
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3550 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3551 u16 len)
3552 {
3553 struct mgmt_addr_info *addr = data;
3554 struct mgmt_pending_cmd *cmd;
3555 struct hci_conn *conn;
3556 int err;
3557
3558 bt_dev_dbg(hdev, "sock %p", sk);
3559
3560 hci_dev_lock(hdev);
3561
3562 if (!hdev_is_powered(hdev)) {
3563 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3564 MGMT_STATUS_NOT_POWERED);
3565 goto unlock;
3566 }
3567
3568 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3569 if (!cmd) {
3570 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3571 MGMT_STATUS_INVALID_PARAMS);
3572 goto unlock;
3573 }
3574
3575 conn = cmd->user_data;
3576
3577 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3579 MGMT_STATUS_INVALID_PARAMS);
3580 goto unlock;
3581 }
3582
3583 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3584 mgmt_pending_remove(cmd);
3585
3586 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3587 addr, sizeof(*addr));
3588
3589 /* Since user doesn't want to proceed with the connection, abort any
3590 * ongoing pairing and then terminate the link if it was created
3591 * because of the pair device action.
3592 */
3593 if (addr->type == BDADDR_BREDR)
3594 hci_remove_link_key(hdev, &addr->bdaddr);
3595 else
3596 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3597 le_addr_type(addr->type));
3598
3599 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3600 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3601
3602 unlock:
3603 hci_dev_unlock(hdev);
3604 return err;
3605 }
3606
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3607 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3608 struct mgmt_addr_info *addr, u16 mgmt_op,
3609 u16 hci_op, __le32 passkey)
3610 {
3611 struct mgmt_pending_cmd *cmd;
3612 struct hci_conn *conn;
3613 int err;
3614
3615 hci_dev_lock(hdev);
3616
3617 if (!hdev_is_powered(hdev)) {
3618 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3619 MGMT_STATUS_NOT_POWERED, addr,
3620 sizeof(*addr));
3621 goto done;
3622 }
3623
3624 if (addr->type == BDADDR_BREDR)
3625 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3626 else
3627 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3628 le_addr_type(addr->type));
3629
3630 if (!conn) {
3631 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3632 MGMT_STATUS_NOT_CONNECTED, addr,
3633 sizeof(*addr));
3634 goto done;
3635 }
3636
3637 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3638 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3639 if (!err)
3640 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3641 MGMT_STATUS_SUCCESS, addr,
3642 sizeof(*addr));
3643 else
3644 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3645 MGMT_STATUS_FAILED, addr,
3646 sizeof(*addr));
3647
3648 goto done;
3649 }
3650
3651 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3652 if (!cmd) {
3653 err = -ENOMEM;
3654 goto done;
3655 }
3656
3657 cmd->cmd_complete = addr_cmd_complete;
3658
3659 /* Continue with pairing via HCI */
3660 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3661 struct hci_cp_user_passkey_reply cp;
3662
3663 bacpy(&cp.bdaddr, &addr->bdaddr);
3664 cp.passkey = passkey;
3665 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3666 } else
3667 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3668 &addr->bdaddr);
3669
3670 if (err < 0)
3671 mgmt_pending_remove(cmd);
3672
3673 done:
3674 hci_dev_unlock(hdev);
3675 return err;
3676 }
3677
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3678 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3679 void *data, u16 len)
3680 {
3681 struct mgmt_cp_pin_code_neg_reply *cp = data;
3682
3683 bt_dev_dbg(hdev, "sock %p", sk);
3684
3685 return user_pairing_resp(sk, hdev, &cp->addr,
3686 MGMT_OP_PIN_CODE_NEG_REPLY,
3687 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3688 }
3689
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3690 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3691 u16 len)
3692 {
3693 struct mgmt_cp_user_confirm_reply *cp = data;
3694
3695 bt_dev_dbg(hdev, "sock %p", sk);
3696
3697 if (len != sizeof(*cp))
3698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3699 MGMT_STATUS_INVALID_PARAMS);
3700
3701 return user_pairing_resp(sk, hdev, &cp->addr,
3702 MGMT_OP_USER_CONFIRM_REPLY,
3703 HCI_OP_USER_CONFIRM_REPLY, 0);
3704 }
3705
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3706 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3707 void *data, u16 len)
3708 {
3709 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3710
3711 bt_dev_dbg(hdev, "sock %p", sk);
3712
3713 return user_pairing_resp(sk, hdev, &cp->addr,
3714 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3715 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3716 }
3717
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3718 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3719 u16 len)
3720 {
3721 struct mgmt_cp_user_passkey_reply *cp = data;
3722
3723 bt_dev_dbg(hdev, "sock %p", sk);
3724
3725 return user_pairing_resp(sk, hdev, &cp->addr,
3726 MGMT_OP_USER_PASSKEY_REPLY,
3727 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3728 }
3729
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3730 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3731 void *data, u16 len)
3732 {
3733 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3734
3735 bt_dev_dbg(hdev, "sock %p", sk);
3736
3737 return user_pairing_resp(sk, hdev, &cp->addr,
3738 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3739 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3740 }
3741
adv_expire_sync(struct hci_dev * hdev,u32 flags)3742 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3743 {
3744 struct adv_info *adv_instance;
3745
3746 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3747 if (!adv_instance)
3748 return 0;
3749
3750 /* stop if current instance doesn't need to be changed */
3751 if (!(adv_instance->flags & flags))
3752 return 0;
3753
3754 cancel_adv_timeout(hdev);
3755
3756 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3757 if (!adv_instance)
3758 return 0;
3759
3760 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3761
3762 return 0;
3763 }
3764
name_changed_sync(struct hci_dev * hdev,void * data)3765 static int name_changed_sync(struct hci_dev *hdev, void *data)
3766 {
3767 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3768 }
3769
set_name_complete(struct hci_dev * hdev,void * data,int err)3770 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3771 {
3772 struct mgmt_pending_cmd *cmd = data;
3773 struct mgmt_cp_set_local_name *cp = cmd->param;
3774 u8 status = mgmt_status(err);
3775
3776 bt_dev_dbg(hdev, "err %d", err);
3777
3778 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3779 return;
3780
3781 if (status) {
3782 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3783 status);
3784 } else {
3785 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3786 cp, sizeof(*cp));
3787
3788 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3789 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3790 }
3791
3792 mgmt_pending_remove(cmd);
3793 }
3794
set_name_sync(struct hci_dev * hdev,void * data)3795 static int set_name_sync(struct hci_dev *hdev, void *data)
3796 {
3797 if (lmp_bredr_capable(hdev)) {
3798 hci_update_name_sync(hdev);
3799 hci_update_eir_sync(hdev);
3800 }
3801
3802 /* The name is stored in the scan response data and so
3803 * no need to update the advertising data here.
3804 */
3805 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3806 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3807
3808 return 0;
3809 }
3810
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3811 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3812 u16 len)
3813 {
3814 struct mgmt_cp_set_local_name *cp = data;
3815 struct mgmt_pending_cmd *cmd;
3816 int err;
3817
3818 bt_dev_dbg(hdev, "sock %p", sk);
3819
3820 hci_dev_lock(hdev);
3821
3822 /* If the old values are the same as the new ones just return a
3823 * direct command complete event.
3824 */
3825 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3826 !memcmp(hdev->short_name, cp->short_name,
3827 sizeof(hdev->short_name))) {
3828 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3829 data, len);
3830 goto failed;
3831 }
3832
3833 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3834
3835 if (!hdev_is_powered(hdev)) {
3836 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3837
3838 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3839 data, len);
3840 if (err < 0)
3841 goto failed;
3842
3843 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3844 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3845 ext_info_changed(hdev, sk);
3846
3847 goto failed;
3848 }
3849
3850 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3851 if (!cmd)
3852 err = -ENOMEM;
3853 else
3854 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3855 set_name_complete);
3856
3857 if (err < 0) {
3858 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3859 MGMT_STATUS_FAILED);
3860
3861 if (cmd)
3862 mgmt_pending_remove(cmd);
3863
3864 goto failed;
3865 }
3866
3867 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3868
3869 failed:
3870 hci_dev_unlock(hdev);
3871 return err;
3872 }
3873
appearance_changed_sync(struct hci_dev * hdev,void * data)3874 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3875 {
3876 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3877 }
3878
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3879 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3880 u16 len)
3881 {
3882 struct mgmt_cp_set_appearance *cp = data;
3883 u16 appearance;
3884 int err;
3885
3886 bt_dev_dbg(hdev, "sock %p", sk);
3887
3888 if (!lmp_le_capable(hdev))
3889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3890 MGMT_STATUS_NOT_SUPPORTED);
3891
3892 appearance = le16_to_cpu(cp->appearance);
3893
3894 hci_dev_lock(hdev);
3895
3896 if (hdev->appearance != appearance) {
3897 hdev->appearance = appearance;
3898
3899 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3900 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3901 NULL);
3902
3903 ext_info_changed(hdev, sk);
3904 }
3905
3906 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3907 0);
3908
3909 hci_dev_unlock(hdev);
3910
3911 return err;
3912 }
3913
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3914 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3915 void *data, u16 len)
3916 {
3917 struct mgmt_rp_get_phy_configuration rp;
3918
3919 bt_dev_dbg(hdev, "sock %p", sk);
3920
3921 hci_dev_lock(hdev);
3922
3923 memset(&rp, 0, sizeof(rp));
3924
3925 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3926 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3927 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3928
3929 hci_dev_unlock(hdev);
3930
3931 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3932 &rp, sizeof(rp));
3933 }
3934
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3935 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3936 {
3937 struct mgmt_ev_phy_configuration_changed ev;
3938
3939 memset(&ev, 0, sizeof(ev));
3940
3941 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3942
3943 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3944 sizeof(ev), skip);
3945 }
3946
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3947 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3948 {
3949 struct mgmt_pending_cmd *cmd = data;
3950 struct sk_buff *skb = cmd->skb;
3951 u8 status = mgmt_status(err);
3952
3953 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3954 return;
3955
3956 if (!status) {
3957 if (!skb)
3958 status = MGMT_STATUS_FAILED;
3959 else if (IS_ERR(skb))
3960 status = mgmt_status(PTR_ERR(skb));
3961 else
3962 status = mgmt_status(skb->data[0]);
3963 }
3964
3965 bt_dev_dbg(hdev, "status %d", status);
3966
3967 if (status) {
3968 mgmt_cmd_status(cmd->sk, hdev->id,
3969 MGMT_OP_SET_PHY_CONFIGURATION, status);
3970 } else {
3971 mgmt_cmd_complete(cmd->sk, hdev->id,
3972 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3973 NULL, 0);
3974
3975 mgmt_phy_configuration_changed(hdev, cmd->sk);
3976 }
3977
3978 if (skb && !IS_ERR(skb))
3979 kfree_skb(skb);
3980
3981 mgmt_pending_remove(cmd);
3982 }
3983
set_default_phy_sync(struct hci_dev * hdev,void * data)3984 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3985 {
3986 struct mgmt_pending_cmd *cmd = data;
3987 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3988 struct hci_cp_le_set_default_phy cp_phy;
3989 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3990
3991 memset(&cp_phy, 0, sizeof(cp_phy));
3992
3993 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3994 cp_phy.all_phys |= 0x01;
3995
3996 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3997 cp_phy.all_phys |= 0x02;
3998
3999 if (selected_phys & MGMT_PHY_LE_1M_TX)
4000 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4001
4002 if (selected_phys & MGMT_PHY_LE_2M_TX)
4003 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4004
4005 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4006 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4007
4008 if (selected_phys & MGMT_PHY_LE_1M_RX)
4009 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4010
4011 if (selected_phys & MGMT_PHY_LE_2M_RX)
4012 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4013
4014 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4015 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4016
4017 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4018 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4019
4020 return 0;
4021 }
4022
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4023 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4024 void *data, u16 len)
4025 {
4026 struct mgmt_cp_set_phy_configuration *cp = data;
4027 struct mgmt_pending_cmd *cmd;
4028 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4029 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4030 bool changed = false;
4031 int err;
4032
4033 bt_dev_dbg(hdev, "sock %p", sk);
4034
4035 configurable_phys = get_configurable_phys(hdev);
4036 supported_phys = get_supported_phys(hdev);
4037 selected_phys = __le32_to_cpu(cp->selected_phys);
4038
4039 if (selected_phys & ~supported_phys)
4040 return mgmt_cmd_status(sk, hdev->id,
4041 MGMT_OP_SET_PHY_CONFIGURATION,
4042 MGMT_STATUS_INVALID_PARAMS);
4043
4044 unconfigure_phys = supported_phys & ~configurable_phys;
4045
4046 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4047 return mgmt_cmd_status(sk, hdev->id,
4048 MGMT_OP_SET_PHY_CONFIGURATION,
4049 MGMT_STATUS_INVALID_PARAMS);
4050
4051 if (selected_phys == get_selected_phys(hdev))
4052 return mgmt_cmd_complete(sk, hdev->id,
4053 MGMT_OP_SET_PHY_CONFIGURATION,
4054 0, NULL, 0);
4055
4056 hci_dev_lock(hdev);
4057
4058 if (!hdev_is_powered(hdev)) {
4059 err = mgmt_cmd_status(sk, hdev->id,
4060 MGMT_OP_SET_PHY_CONFIGURATION,
4061 MGMT_STATUS_REJECTED);
4062 goto unlock;
4063 }
4064
4065 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4066 err = mgmt_cmd_status(sk, hdev->id,
4067 MGMT_OP_SET_PHY_CONFIGURATION,
4068 MGMT_STATUS_BUSY);
4069 goto unlock;
4070 }
4071
4072 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4073 pkt_type |= (HCI_DH3 | HCI_DM3);
4074 else
4075 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4076
4077 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4078 pkt_type |= (HCI_DH5 | HCI_DM5);
4079 else
4080 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4081
4082 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4083 pkt_type &= ~HCI_2DH1;
4084 else
4085 pkt_type |= HCI_2DH1;
4086
4087 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4088 pkt_type &= ~HCI_2DH3;
4089 else
4090 pkt_type |= HCI_2DH3;
4091
4092 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4093 pkt_type &= ~HCI_2DH5;
4094 else
4095 pkt_type |= HCI_2DH5;
4096
4097 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4098 pkt_type &= ~HCI_3DH1;
4099 else
4100 pkt_type |= HCI_3DH1;
4101
4102 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4103 pkt_type &= ~HCI_3DH3;
4104 else
4105 pkt_type |= HCI_3DH3;
4106
4107 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4108 pkt_type &= ~HCI_3DH5;
4109 else
4110 pkt_type |= HCI_3DH5;
4111
4112 if (pkt_type != hdev->pkt_type) {
4113 hdev->pkt_type = pkt_type;
4114 changed = true;
4115 }
4116
4117 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4118 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4119 if (changed)
4120 mgmt_phy_configuration_changed(hdev, sk);
4121
4122 err = mgmt_cmd_complete(sk, hdev->id,
4123 MGMT_OP_SET_PHY_CONFIGURATION,
4124 0, NULL, 0);
4125
4126 goto unlock;
4127 }
4128
4129 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4130 len);
4131 if (!cmd)
4132 err = -ENOMEM;
4133 else
4134 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4135 set_default_phy_complete);
4136
4137 if (err < 0) {
4138 err = mgmt_cmd_status(sk, hdev->id,
4139 MGMT_OP_SET_PHY_CONFIGURATION,
4140 MGMT_STATUS_FAILED);
4141
4142 if (cmd)
4143 mgmt_pending_remove(cmd);
4144 }
4145
4146 unlock:
4147 hci_dev_unlock(hdev);
4148
4149 return err;
4150 }
4151
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4152 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4153 u16 len)
4154 {
4155 int err = MGMT_STATUS_SUCCESS;
4156 struct mgmt_cp_set_blocked_keys *keys = data;
4157 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4158 sizeof(struct mgmt_blocked_key_info));
4159 u16 key_count, expected_len;
4160 int i;
4161
4162 bt_dev_dbg(hdev, "sock %p", sk);
4163
4164 key_count = __le16_to_cpu(keys->key_count);
4165 if (key_count > max_key_count) {
4166 bt_dev_err(hdev, "too big key_count value %u", key_count);
4167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4168 MGMT_STATUS_INVALID_PARAMS);
4169 }
4170
4171 expected_len = struct_size(keys, keys, key_count);
4172 if (expected_len != len) {
4173 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4174 expected_len, len);
4175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4176 MGMT_STATUS_INVALID_PARAMS);
4177 }
4178
4179 hci_dev_lock(hdev);
4180
4181 hci_blocked_keys_clear(hdev);
4182
4183 for (i = 0; i < key_count; ++i) {
4184 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4185
4186 if (!b) {
4187 err = MGMT_STATUS_NO_RESOURCES;
4188 break;
4189 }
4190
4191 b->type = keys->keys[i].type;
4192 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4193 list_add_rcu(&b->list, &hdev->blocked_keys);
4194 }
4195 hci_dev_unlock(hdev);
4196
4197 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4198 err, NULL, 0);
4199 }
4200
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4201 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4202 void *data, u16 len)
4203 {
4204 struct mgmt_mode *cp = data;
4205 int err;
4206 bool changed = false;
4207
4208 bt_dev_dbg(hdev, "sock %p", sk);
4209
4210 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4211 return mgmt_cmd_status(sk, hdev->id,
4212 MGMT_OP_SET_WIDEBAND_SPEECH,
4213 MGMT_STATUS_NOT_SUPPORTED);
4214
4215 if (cp->val != 0x00 && cp->val != 0x01)
4216 return mgmt_cmd_status(sk, hdev->id,
4217 MGMT_OP_SET_WIDEBAND_SPEECH,
4218 MGMT_STATUS_INVALID_PARAMS);
4219
4220 hci_dev_lock(hdev);
4221
4222 if (hdev_is_powered(hdev) &&
4223 !!cp->val != hci_dev_test_flag(hdev,
4224 HCI_WIDEBAND_SPEECH_ENABLED)) {
4225 err = mgmt_cmd_status(sk, hdev->id,
4226 MGMT_OP_SET_WIDEBAND_SPEECH,
4227 MGMT_STATUS_REJECTED);
4228 goto unlock;
4229 }
4230
4231 if (cp->val)
4232 changed = !hci_dev_test_and_set_flag(hdev,
4233 HCI_WIDEBAND_SPEECH_ENABLED);
4234 else
4235 changed = hci_dev_test_and_clear_flag(hdev,
4236 HCI_WIDEBAND_SPEECH_ENABLED);
4237
4238 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4239 if (err < 0)
4240 goto unlock;
4241
4242 if (changed)
4243 err = new_settings(hdev, sk);
4244
4245 unlock:
4246 hci_dev_unlock(hdev);
4247 return err;
4248 }
4249
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4250 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4251 void *data, u16 data_len)
4252 {
4253 char buf[20];
4254 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4255 u16 cap_len = 0;
4256 u8 flags = 0;
4257 u8 tx_power_range[2];
4258
4259 bt_dev_dbg(hdev, "sock %p", sk);
4260
4261 memset(&buf, 0, sizeof(buf));
4262
4263 hci_dev_lock(hdev);
4264
4265 /* When the Read Simple Pairing Options command is supported, then
4266 * the remote public key validation is supported.
4267 *
4268 * Alternatively, when Microsoft extensions are available, they can
4269 * indicate support for public key validation as well.
4270 */
4271 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4272 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4273
4274 flags |= 0x02; /* Remote public key validation (LE) */
4275
4276 /* When the Read Encryption Key Size command is supported, then the
4277 * encryption key size is enforced.
4278 */
4279 if (hdev->commands[20] & 0x10)
4280 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4281
4282 flags |= 0x08; /* Encryption key size enforcement (LE) */
4283
4284 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4285 &flags, 1);
4286
4287 /* When the Read Simple Pairing Options command is supported, then
4288 * also max encryption key size information is provided.
4289 */
4290 if (hdev->commands[41] & 0x08)
4291 cap_len = eir_append_le16(rp->cap, cap_len,
4292 MGMT_CAP_MAX_ENC_KEY_SIZE,
4293 hdev->max_enc_key_size);
4294
4295 cap_len = eir_append_le16(rp->cap, cap_len,
4296 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4297 SMP_MAX_ENC_KEY_SIZE);
4298
4299 /* Append the min/max LE tx power parameters if we were able to fetch
4300 * it from the controller
4301 */
4302 if (hdev->commands[38] & 0x80) {
4303 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4304 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4305 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4306 tx_power_range, 2);
4307 }
4308
4309 rp->cap_len = cpu_to_le16(cap_len);
4310
4311 hci_dev_unlock(hdev);
4312
4313 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4314 rp, sizeof(*rp) + cap_len);
4315 }
4316
4317 #ifdef CONFIG_BT_FEATURE_DEBUG
4318 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4319 static const u8 debug_uuid[16] = {
4320 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4321 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4322 };
4323 #endif
4324
4325 /* 330859bc-7506-492d-9370-9a6f0614037f */
4326 static const u8 quality_report_uuid[16] = {
4327 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4328 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4329 };
4330
4331 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4332 static const u8 offload_codecs_uuid[16] = {
4333 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4334 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4335 };
4336
4337 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4338 static const u8 le_simultaneous_roles_uuid[16] = {
4339 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4340 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4341 };
4342
4343 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4344 static const u8 rpa_resolution_uuid[16] = {
4345 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4346 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4347 };
4348
4349 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4350 static const u8 iso_socket_uuid[16] = {
4351 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4352 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4353 };
4354
4355 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4356 static const u8 mgmt_mesh_uuid[16] = {
4357 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4358 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4359 };
4360
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4361 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4362 void *data, u16 data_len)
4363 {
4364 struct mgmt_rp_read_exp_features_info *rp;
4365 size_t len;
4366 u16 idx = 0;
4367 u32 flags;
4368 int status;
4369
4370 bt_dev_dbg(hdev, "sock %p", sk);
4371
4372 /* Enough space for 7 features */
4373 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4374 rp = kzalloc(len, GFP_KERNEL);
4375 if (!rp)
4376 return -ENOMEM;
4377
4378 #ifdef CONFIG_BT_FEATURE_DEBUG
4379 if (!hdev) {
4380 flags = bt_dbg_get() ? BIT(0) : 0;
4381
4382 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4383 rp->features[idx].flags = cpu_to_le32(flags);
4384 idx++;
4385 }
4386 #endif
4387
4388 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4389 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4390 flags = BIT(0);
4391 else
4392 flags = 0;
4393
4394 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4395 rp->features[idx].flags = cpu_to_le32(flags);
4396 idx++;
4397 }
4398
4399 if (hdev && ll_privacy_capable(hdev)) {
4400 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4401 flags = BIT(0) | BIT(1);
4402 else
4403 flags = BIT(1);
4404
4405 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4406 rp->features[idx].flags = cpu_to_le32(flags);
4407 idx++;
4408 }
4409
4410 if (hdev && (aosp_has_quality_report(hdev) ||
4411 hdev->set_quality_report)) {
4412 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4413 flags = BIT(0);
4414 else
4415 flags = 0;
4416
4417 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4418 rp->features[idx].flags = cpu_to_le32(flags);
4419 idx++;
4420 }
4421
4422 if (hdev && hdev->get_data_path_id) {
4423 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4424 flags = BIT(0);
4425 else
4426 flags = 0;
4427
4428 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4429 rp->features[idx].flags = cpu_to_le32(flags);
4430 idx++;
4431 }
4432
4433 if (IS_ENABLED(CONFIG_BT_LE)) {
4434 flags = iso_enabled() ? BIT(0) : 0;
4435 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4436 rp->features[idx].flags = cpu_to_le32(flags);
4437 idx++;
4438 }
4439
4440 if (hdev && lmp_le_capable(hdev)) {
4441 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4442 flags = BIT(0);
4443 else
4444 flags = 0;
4445
4446 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4447 rp->features[idx].flags = cpu_to_le32(flags);
4448 idx++;
4449 }
4450
4451 rp->feature_count = cpu_to_le16(idx);
4452
4453 /* After reading the experimental features information, enable
4454 * the events to update client on any future change.
4455 */
4456 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4457
4458 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4459 MGMT_OP_READ_EXP_FEATURES_INFO,
4460 0, rp, sizeof(*rp) + (20 * idx));
4461
4462 kfree(rp);
4463 return status;
4464 }
4465
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4466 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4467 struct sock *skip)
4468 {
4469 struct mgmt_ev_exp_feature_changed ev;
4470
4471 memset(&ev, 0, sizeof(ev));
4472 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4473 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4474
4475 // Do we need to be atomic with the conn_flags?
4476 if (enabled && privacy_mode_capable(hdev))
4477 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4478 else
4479 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4480
4481 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4482 &ev, sizeof(ev),
4483 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4484
4485 }
4486
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4487 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4488 bool enabled, struct sock *skip)
4489 {
4490 struct mgmt_ev_exp_feature_changed ev;
4491
4492 memset(&ev, 0, sizeof(ev));
4493 memcpy(ev.uuid, uuid, 16);
4494 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4495
4496 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4497 &ev, sizeof(ev),
4498 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4499 }
4500
4501 #define EXP_FEAT(_uuid, _set_func) \
4502 { \
4503 .uuid = _uuid, \
4504 .set_func = _set_func, \
4505 }
4506
4507 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4508 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4509 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4510 {
4511 struct mgmt_rp_set_exp_feature rp;
4512
4513 memset(rp.uuid, 0, 16);
4514 rp.flags = cpu_to_le32(0);
4515
4516 #ifdef CONFIG_BT_FEATURE_DEBUG
4517 if (!hdev) {
4518 bool changed = bt_dbg_get();
4519
4520 bt_dbg_set(false);
4521
4522 if (changed)
4523 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4524 }
4525 #endif
4526
4527 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4528 bool changed;
4529
4530 changed = hci_dev_test_and_clear_flag(hdev,
4531 HCI_ENABLE_LL_PRIVACY);
4532 if (changed)
4533 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4534 sk);
4535 }
4536
4537 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4538
4539 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4540 MGMT_OP_SET_EXP_FEATURE, 0,
4541 &rp, sizeof(rp));
4542 }
4543
4544 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4545 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4546 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4547 {
4548 struct mgmt_rp_set_exp_feature rp;
4549
4550 bool val, changed;
4551 int err;
4552
4553 /* Command requires to use the non-controller index */
4554 if (hdev)
4555 return mgmt_cmd_status(sk, hdev->id,
4556 MGMT_OP_SET_EXP_FEATURE,
4557 MGMT_STATUS_INVALID_INDEX);
4558
4559 /* Parameters are limited to a single octet */
4560 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4561 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4562 MGMT_OP_SET_EXP_FEATURE,
4563 MGMT_STATUS_INVALID_PARAMS);
4564
4565 /* Only boolean on/off is supported */
4566 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4567 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4568 MGMT_OP_SET_EXP_FEATURE,
4569 MGMT_STATUS_INVALID_PARAMS);
4570
4571 val = !!cp->param[0];
4572 changed = val ? !bt_dbg_get() : bt_dbg_get();
4573 bt_dbg_set(val);
4574
4575 memcpy(rp.uuid, debug_uuid, 16);
4576 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4577
4578 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4579
4580 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4581 MGMT_OP_SET_EXP_FEATURE, 0,
4582 &rp, sizeof(rp));
4583
4584 if (changed)
4585 exp_feature_changed(hdev, debug_uuid, val, sk);
4586
4587 return err;
4588 }
4589 #endif
4590
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4591 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4592 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4593 {
4594 struct mgmt_rp_set_exp_feature rp;
4595 bool val, changed;
4596 int err;
4597
4598 /* Command requires to use the controller index */
4599 if (!hdev)
4600 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4601 MGMT_OP_SET_EXP_FEATURE,
4602 MGMT_STATUS_INVALID_INDEX);
4603
4604 /* Parameters are limited to a single octet */
4605 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4606 return mgmt_cmd_status(sk, hdev->id,
4607 MGMT_OP_SET_EXP_FEATURE,
4608 MGMT_STATUS_INVALID_PARAMS);
4609
4610 /* Only boolean on/off is supported */
4611 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4612 return mgmt_cmd_status(sk, hdev->id,
4613 MGMT_OP_SET_EXP_FEATURE,
4614 MGMT_STATUS_INVALID_PARAMS);
4615
4616 val = !!cp->param[0];
4617
4618 if (val) {
4619 changed = !hci_dev_test_and_set_flag(hdev,
4620 HCI_MESH_EXPERIMENTAL);
4621 } else {
4622 hci_dev_clear_flag(hdev, HCI_MESH);
4623 changed = hci_dev_test_and_clear_flag(hdev,
4624 HCI_MESH_EXPERIMENTAL);
4625 }
4626
4627 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4628 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4629
4630 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4631
4632 err = mgmt_cmd_complete(sk, hdev->id,
4633 MGMT_OP_SET_EXP_FEATURE, 0,
4634 &rp, sizeof(rp));
4635
4636 if (changed)
4637 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4638
4639 return err;
4640 }
4641
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4642 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4643 struct mgmt_cp_set_exp_feature *cp,
4644 u16 data_len)
4645 {
4646 struct mgmt_rp_set_exp_feature rp;
4647 bool val, changed;
4648 int err;
4649 u32 flags;
4650
4651 /* Command requires to use the controller index */
4652 if (!hdev)
4653 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4654 MGMT_OP_SET_EXP_FEATURE,
4655 MGMT_STATUS_INVALID_INDEX);
4656
4657 /* Changes can only be made when controller is powered down */
4658 if (hdev_is_powered(hdev))
4659 return mgmt_cmd_status(sk, hdev->id,
4660 MGMT_OP_SET_EXP_FEATURE,
4661 MGMT_STATUS_REJECTED);
4662
4663 /* Parameters are limited to a single octet */
4664 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4665 return mgmt_cmd_status(sk, hdev->id,
4666 MGMT_OP_SET_EXP_FEATURE,
4667 MGMT_STATUS_INVALID_PARAMS);
4668
4669 /* Only boolean on/off is supported */
4670 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4671 return mgmt_cmd_status(sk, hdev->id,
4672 MGMT_OP_SET_EXP_FEATURE,
4673 MGMT_STATUS_INVALID_PARAMS);
4674
4675 val = !!cp->param[0];
4676
4677 if (val) {
4678 changed = !hci_dev_test_and_set_flag(hdev,
4679 HCI_ENABLE_LL_PRIVACY);
4680 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4681
4682 /* Enable LL privacy + supported settings changed */
4683 flags = BIT(0) | BIT(1);
4684 } else {
4685 changed = hci_dev_test_and_clear_flag(hdev,
4686 HCI_ENABLE_LL_PRIVACY);
4687
4688 /* Disable LL privacy + supported settings changed */
4689 flags = BIT(1);
4690 }
4691
4692 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4693 rp.flags = cpu_to_le32(flags);
4694
4695 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4696
4697 err = mgmt_cmd_complete(sk, hdev->id,
4698 MGMT_OP_SET_EXP_FEATURE, 0,
4699 &rp, sizeof(rp));
4700
4701 if (changed)
4702 exp_ll_privacy_feature_changed(val, hdev, sk);
4703
4704 return err;
4705 }
4706
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4707 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4708 struct mgmt_cp_set_exp_feature *cp,
4709 u16 data_len)
4710 {
4711 struct mgmt_rp_set_exp_feature rp;
4712 bool val, changed;
4713 int err;
4714
4715 /* Command requires to use a valid controller index */
4716 if (!hdev)
4717 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4718 MGMT_OP_SET_EXP_FEATURE,
4719 MGMT_STATUS_INVALID_INDEX);
4720
4721 /* Parameters are limited to a single octet */
4722 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4723 return mgmt_cmd_status(sk, hdev->id,
4724 MGMT_OP_SET_EXP_FEATURE,
4725 MGMT_STATUS_INVALID_PARAMS);
4726
4727 /* Only boolean on/off is supported */
4728 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4729 return mgmt_cmd_status(sk, hdev->id,
4730 MGMT_OP_SET_EXP_FEATURE,
4731 MGMT_STATUS_INVALID_PARAMS);
4732
4733 hci_req_sync_lock(hdev);
4734
4735 val = !!cp->param[0];
4736 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4737
4738 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4739 err = mgmt_cmd_status(sk, hdev->id,
4740 MGMT_OP_SET_EXP_FEATURE,
4741 MGMT_STATUS_NOT_SUPPORTED);
4742 goto unlock_quality_report;
4743 }
4744
4745 if (changed) {
4746 if (hdev->set_quality_report)
4747 err = hdev->set_quality_report(hdev, val);
4748 else
4749 err = aosp_set_quality_report(hdev, val);
4750
4751 if (err) {
4752 err = mgmt_cmd_status(sk, hdev->id,
4753 MGMT_OP_SET_EXP_FEATURE,
4754 MGMT_STATUS_FAILED);
4755 goto unlock_quality_report;
4756 }
4757
4758 if (val)
4759 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4760 else
4761 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4762 }
4763
4764 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4765
4766 memcpy(rp.uuid, quality_report_uuid, 16);
4767 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4768 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4769
4770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4771 &rp, sizeof(rp));
4772
4773 if (changed)
4774 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4775
4776 unlock_quality_report:
4777 hci_req_sync_unlock(hdev);
4778 return err;
4779 }
4780
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4781 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4782 struct mgmt_cp_set_exp_feature *cp,
4783 u16 data_len)
4784 {
4785 bool val, changed;
4786 int err;
4787 struct mgmt_rp_set_exp_feature rp;
4788
4789 /* Command requires to use a valid controller index */
4790 if (!hdev)
4791 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4792 MGMT_OP_SET_EXP_FEATURE,
4793 MGMT_STATUS_INVALID_INDEX);
4794
4795 /* Parameters are limited to a single octet */
4796 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4797 return mgmt_cmd_status(sk, hdev->id,
4798 MGMT_OP_SET_EXP_FEATURE,
4799 MGMT_STATUS_INVALID_PARAMS);
4800
4801 /* Only boolean on/off is supported */
4802 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4803 return mgmt_cmd_status(sk, hdev->id,
4804 MGMT_OP_SET_EXP_FEATURE,
4805 MGMT_STATUS_INVALID_PARAMS);
4806
4807 val = !!cp->param[0];
4808 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4809
4810 if (!hdev->get_data_path_id) {
4811 return mgmt_cmd_status(sk, hdev->id,
4812 MGMT_OP_SET_EXP_FEATURE,
4813 MGMT_STATUS_NOT_SUPPORTED);
4814 }
4815
4816 if (changed) {
4817 if (val)
4818 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4819 else
4820 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4821 }
4822
4823 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4824 val, changed);
4825
4826 memcpy(rp.uuid, offload_codecs_uuid, 16);
4827 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4828 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4829 err = mgmt_cmd_complete(sk, hdev->id,
4830 MGMT_OP_SET_EXP_FEATURE, 0,
4831 &rp, sizeof(rp));
4832
4833 if (changed)
4834 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4835
4836 return err;
4837 }
4838
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4839 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4840 struct mgmt_cp_set_exp_feature *cp,
4841 u16 data_len)
4842 {
4843 bool val, changed;
4844 int err;
4845 struct mgmt_rp_set_exp_feature rp;
4846
4847 /* Command requires to use a valid controller index */
4848 if (!hdev)
4849 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4850 MGMT_OP_SET_EXP_FEATURE,
4851 MGMT_STATUS_INVALID_INDEX);
4852
4853 /* Parameters are limited to a single octet */
4854 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4855 return mgmt_cmd_status(sk, hdev->id,
4856 MGMT_OP_SET_EXP_FEATURE,
4857 MGMT_STATUS_INVALID_PARAMS);
4858
4859 /* Only boolean on/off is supported */
4860 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4861 return mgmt_cmd_status(sk, hdev->id,
4862 MGMT_OP_SET_EXP_FEATURE,
4863 MGMT_STATUS_INVALID_PARAMS);
4864
4865 val = !!cp->param[0];
4866 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4867
4868 if (!hci_dev_le_state_simultaneous(hdev)) {
4869 return mgmt_cmd_status(sk, hdev->id,
4870 MGMT_OP_SET_EXP_FEATURE,
4871 MGMT_STATUS_NOT_SUPPORTED);
4872 }
4873
4874 if (changed) {
4875 if (val)
4876 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4877 else
4878 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4879 }
4880
4881 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4882 val, changed);
4883
4884 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4885 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4886 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4887 err = mgmt_cmd_complete(sk, hdev->id,
4888 MGMT_OP_SET_EXP_FEATURE, 0,
4889 &rp, sizeof(rp));
4890
4891 if (changed)
4892 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4893
4894 return err;
4895 }
4896
4897 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4898 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4899 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4900 {
4901 struct mgmt_rp_set_exp_feature rp;
4902 bool val, changed = false;
4903 int err;
4904
4905 /* Command requires to use the non-controller index */
4906 if (hdev)
4907 return mgmt_cmd_status(sk, hdev->id,
4908 MGMT_OP_SET_EXP_FEATURE,
4909 MGMT_STATUS_INVALID_INDEX);
4910
4911 /* Parameters are limited to a single octet */
4912 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4913 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4914 MGMT_OP_SET_EXP_FEATURE,
4915 MGMT_STATUS_INVALID_PARAMS);
4916
4917 /* Only boolean on/off is supported */
4918 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4919 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4920 MGMT_OP_SET_EXP_FEATURE,
4921 MGMT_STATUS_INVALID_PARAMS);
4922
4923 val = cp->param[0] ? true : false;
4924 if (val)
4925 err = iso_init();
4926 else
4927 err = iso_exit();
4928
4929 if (!err)
4930 changed = true;
4931
4932 memcpy(rp.uuid, iso_socket_uuid, 16);
4933 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4934
4935 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4936
4937 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4938 MGMT_OP_SET_EXP_FEATURE, 0,
4939 &rp, sizeof(rp));
4940
4941 if (changed)
4942 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4943
4944 return err;
4945 }
4946 #endif
4947
4948 static const struct mgmt_exp_feature {
4949 const u8 *uuid;
4950 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4951 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4952 } exp_features[] = {
4953 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4954 #ifdef CONFIG_BT_FEATURE_DEBUG
4955 EXP_FEAT(debug_uuid, set_debug_func),
4956 #endif
4957 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4958 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4959 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4960 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4961 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4962 #ifdef CONFIG_BT_LE
4963 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4964 #endif
4965
4966 /* end with a null feature */
4967 EXP_FEAT(NULL, NULL)
4968 };
4969
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4970 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4971 void *data, u16 data_len)
4972 {
4973 struct mgmt_cp_set_exp_feature *cp = data;
4974 size_t i = 0;
4975
4976 bt_dev_dbg(hdev, "sock %p", sk);
4977
4978 for (i = 0; exp_features[i].uuid; i++) {
4979 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4980 return exp_features[i].set_func(sk, hdev, cp, data_len);
4981 }
4982
4983 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4984 MGMT_OP_SET_EXP_FEATURE,
4985 MGMT_STATUS_NOT_SUPPORTED);
4986 }
4987
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4988 static u32 get_params_flags(struct hci_dev *hdev,
4989 struct hci_conn_params *params)
4990 {
4991 u32 flags = hdev->conn_flags;
4992
4993 /* Devices using RPAs can only be programmed in the acceptlist if
4994 * LL Privacy has been enable otherwise they cannot mark
4995 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4996 */
4997 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4998 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
4999 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5000
5001 return flags;
5002 }
5003
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5004 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5005 u16 data_len)
5006 {
5007 struct mgmt_cp_get_device_flags *cp = data;
5008 struct mgmt_rp_get_device_flags rp;
5009 struct bdaddr_list_with_flags *br_params;
5010 struct hci_conn_params *params;
5011 u32 supported_flags;
5012 u32 current_flags = 0;
5013 u8 status = MGMT_STATUS_INVALID_PARAMS;
5014
5015 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5016 &cp->addr.bdaddr, cp->addr.type);
5017
5018 hci_dev_lock(hdev);
5019
5020 supported_flags = hdev->conn_flags;
5021
5022 memset(&rp, 0, sizeof(rp));
5023
5024 if (cp->addr.type == BDADDR_BREDR) {
5025 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5026 &cp->addr.bdaddr,
5027 cp->addr.type);
5028 if (!br_params)
5029 goto done;
5030
5031 current_flags = br_params->flags;
5032 } else {
5033 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5034 le_addr_type(cp->addr.type));
5035 if (!params)
5036 goto done;
5037
5038 supported_flags = get_params_flags(hdev, params);
5039 current_flags = params->flags;
5040 }
5041
5042 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5043 rp.addr.type = cp->addr.type;
5044 rp.supported_flags = cpu_to_le32(supported_flags);
5045 rp.current_flags = cpu_to_le32(current_flags);
5046
5047 status = MGMT_STATUS_SUCCESS;
5048
5049 done:
5050 hci_dev_unlock(hdev);
5051
5052 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5053 &rp, sizeof(rp));
5054 }
5055
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5056 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5057 bdaddr_t *bdaddr, u8 bdaddr_type,
5058 u32 supported_flags, u32 current_flags)
5059 {
5060 struct mgmt_ev_device_flags_changed ev;
5061
5062 bacpy(&ev.addr.bdaddr, bdaddr);
5063 ev.addr.type = bdaddr_type;
5064 ev.supported_flags = cpu_to_le32(supported_flags);
5065 ev.current_flags = cpu_to_le32(current_flags);
5066
5067 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5068 }
5069
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5070 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5071 u16 len)
5072 {
5073 struct mgmt_cp_set_device_flags *cp = data;
5074 struct bdaddr_list_with_flags *br_params;
5075 struct hci_conn_params *params;
5076 u8 status = MGMT_STATUS_INVALID_PARAMS;
5077 u32 supported_flags;
5078 u32 current_flags = __le32_to_cpu(cp->current_flags);
5079
5080 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5081 &cp->addr.bdaddr, cp->addr.type, current_flags);
5082
5083 // We should take hci_dev_lock() early, I think.. conn_flags can change
5084 supported_flags = hdev->conn_flags;
5085
5086 if ((supported_flags | current_flags) != supported_flags) {
5087 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5088 current_flags, supported_flags);
5089 goto done;
5090 }
5091
5092 hci_dev_lock(hdev);
5093
5094 if (cp->addr.type == BDADDR_BREDR) {
5095 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5096 &cp->addr.bdaddr,
5097 cp->addr.type);
5098
5099 if (br_params) {
5100 br_params->flags = current_flags;
5101 status = MGMT_STATUS_SUCCESS;
5102 } else {
5103 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5104 &cp->addr.bdaddr, cp->addr.type);
5105 }
5106
5107 goto unlock;
5108 }
5109
5110 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5111 le_addr_type(cp->addr.type));
5112 if (!params) {
5113 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5114 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5115 goto unlock;
5116 }
5117
5118 supported_flags = get_params_flags(hdev, params);
5119
5120 if ((supported_flags | current_flags) != supported_flags) {
5121 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5122 current_flags, supported_flags);
5123 goto unlock;
5124 }
5125
5126 WRITE_ONCE(params->flags, current_flags);
5127 status = MGMT_STATUS_SUCCESS;
5128
5129 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5130 * has been set.
5131 */
5132 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5133 hci_update_passive_scan(hdev);
5134
5135 unlock:
5136 hci_dev_unlock(hdev);
5137
5138 done:
5139 if (status == MGMT_STATUS_SUCCESS)
5140 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5141 supported_flags, current_flags);
5142
5143 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5144 &cp->addr, sizeof(cp->addr));
5145 }
5146
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5147 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5148 u16 handle)
5149 {
5150 struct mgmt_ev_adv_monitor_added ev;
5151
5152 ev.monitor_handle = cpu_to_le16(handle);
5153
5154 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5155 }
5156
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5157 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5158 {
5159 struct mgmt_ev_adv_monitor_removed ev;
5160 struct mgmt_pending_cmd *cmd;
5161 struct sock *sk_skip = NULL;
5162 struct mgmt_cp_remove_adv_monitor *cp;
5163
5164 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5165 if (cmd) {
5166 cp = cmd->param;
5167
5168 if (cp->monitor_handle)
5169 sk_skip = cmd->sk;
5170 }
5171
5172 ev.monitor_handle = cpu_to_le16(handle);
5173
5174 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5175 }
5176
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5177 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5178 void *data, u16 len)
5179 {
5180 struct adv_monitor *monitor = NULL;
5181 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5182 int handle, err;
5183 size_t rp_size = 0;
5184 __u32 supported = 0;
5185 __u32 enabled = 0;
5186 __u16 num_handles = 0;
5187 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5188
5189 BT_DBG("request for %s", hdev->name);
5190
5191 hci_dev_lock(hdev);
5192
5193 if (msft_monitor_supported(hdev))
5194 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5195
5196 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5197 handles[num_handles++] = monitor->handle;
5198
5199 hci_dev_unlock(hdev);
5200
5201 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5202 rp = kmalloc(rp_size, GFP_KERNEL);
5203 if (!rp)
5204 return -ENOMEM;
5205
5206 /* All supported features are currently enabled */
5207 enabled = supported;
5208
5209 rp->supported_features = cpu_to_le32(supported);
5210 rp->enabled_features = cpu_to_le32(enabled);
5211 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5212 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5213 rp->num_handles = cpu_to_le16(num_handles);
5214 if (num_handles)
5215 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5216
5217 err = mgmt_cmd_complete(sk, hdev->id,
5218 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5219 MGMT_STATUS_SUCCESS, rp, rp_size);
5220
5221 kfree(rp);
5222
5223 return err;
5224 }
5225
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5226 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5227 void *data, int status)
5228 {
5229 struct mgmt_rp_add_adv_patterns_monitor rp;
5230 struct mgmt_pending_cmd *cmd = data;
5231 struct adv_monitor *monitor = cmd->user_data;
5232
5233 hci_dev_lock(hdev);
5234
5235 rp.monitor_handle = cpu_to_le16(monitor->handle);
5236
5237 if (!status) {
5238 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5239 hdev->adv_monitors_cnt++;
5240 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5241 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5242 hci_update_passive_scan(hdev);
5243 }
5244
5245 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5246 mgmt_status(status), &rp, sizeof(rp));
5247 mgmt_pending_remove(cmd);
5248
5249 hci_dev_unlock(hdev);
5250 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5251 rp.monitor_handle, status);
5252 }
5253
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5254 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5255 {
5256 struct mgmt_pending_cmd *cmd = data;
5257 struct adv_monitor *monitor = cmd->user_data;
5258
5259 return hci_add_adv_monitor(hdev, monitor);
5260 }
5261
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5262 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5263 struct adv_monitor *m, u8 status,
5264 void *data, u16 len, u16 op)
5265 {
5266 struct mgmt_pending_cmd *cmd;
5267 int err;
5268
5269 hci_dev_lock(hdev);
5270
5271 if (status)
5272 goto unlock;
5273
5274 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5275 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5276 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5277 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5278 status = MGMT_STATUS_BUSY;
5279 goto unlock;
5280 }
5281
5282 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5283 if (!cmd) {
5284 status = MGMT_STATUS_NO_RESOURCES;
5285 goto unlock;
5286 }
5287
5288 cmd->user_data = m;
5289 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5290 mgmt_add_adv_patterns_monitor_complete);
5291 if (err) {
5292 if (err == -ENOMEM)
5293 status = MGMT_STATUS_NO_RESOURCES;
5294 else
5295 status = MGMT_STATUS_FAILED;
5296
5297 goto unlock;
5298 }
5299
5300 hci_dev_unlock(hdev);
5301
5302 return 0;
5303
5304 unlock:
5305 hci_free_adv_monitor(hdev, m);
5306 hci_dev_unlock(hdev);
5307 return mgmt_cmd_status(sk, hdev->id, op, status);
5308 }
5309
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5310 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5311 struct mgmt_adv_rssi_thresholds *rssi)
5312 {
5313 if (rssi) {
5314 m->rssi.low_threshold = rssi->low_threshold;
5315 m->rssi.low_threshold_timeout =
5316 __le16_to_cpu(rssi->low_threshold_timeout);
5317 m->rssi.high_threshold = rssi->high_threshold;
5318 m->rssi.high_threshold_timeout =
5319 __le16_to_cpu(rssi->high_threshold_timeout);
5320 m->rssi.sampling_period = rssi->sampling_period;
5321 } else {
5322 /* Default values. These numbers are the least constricting
5323 * parameters for MSFT API to work, so it behaves as if there
5324 * are no rssi parameter to consider. May need to be changed
5325 * if other API are to be supported.
5326 */
5327 m->rssi.low_threshold = -127;
5328 m->rssi.low_threshold_timeout = 60;
5329 m->rssi.high_threshold = -127;
5330 m->rssi.high_threshold_timeout = 0;
5331 m->rssi.sampling_period = 0;
5332 }
5333 }
5334
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5335 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5336 struct mgmt_adv_pattern *patterns)
5337 {
5338 u8 offset = 0, length = 0;
5339 struct adv_pattern *p = NULL;
5340 int i;
5341
5342 for (i = 0; i < pattern_count; i++) {
5343 offset = patterns[i].offset;
5344 length = patterns[i].length;
5345 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5346 length > HCI_MAX_EXT_AD_LENGTH ||
5347 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5348 return MGMT_STATUS_INVALID_PARAMS;
5349
5350 p = kmalloc(sizeof(*p), GFP_KERNEL);
5351 if (!p)
5352 return MGMT_STATUS_NO_RESOURCES;
5353
5354 p->ad_type = patterns[i].ad_type;
5355 p->offset = patterns[i].offset;
5356 p->length = patterns[i].length;
5357 memcpy(p->value, patterns[i].value, p->length);
5358
5359 INIT_LIST_HEAD(&p->list);
5360 list_add(&p->list, &m->patterns);
5361 }
5362
5363 return MGMT_STATUS_SUCCESS;
5364 }
5365
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5366 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5367 void *data, u16 len)
5368 {
5369 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5370 struct adv_monitor *m = NULL;
5371 u8 status = MGMT_STATUS_SUCCESS;
5372 size_t expected_size = sizeof(*cp);
5373
5374 BT_DBG("request for %s", hdev->name);
5375
5376 if (len <= sizeof(*cp)) {
5377 status = MGMT_STATUS_INVALID_PARAMS;
5378 goto done;
5379 }
5380
5381 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5382 if (len != expected_size) {
5383 status = MGMT_STATUS_INVALID_PARAMS;
5384 goto done;
5385 }
5386
5387 m = kzalloc(sizeof(*m), GFP_KERNEL);
5388 if (!m) {
5389 status = MGMT_STATUS_NO_RESOURCES;
5390 goto done;
5391 }
5392
5393 INIT_LIST_HEAD(&m->patterns);
5394
5395 parse_adv_monitor_rssi(m, NULL);
5396 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5397
5398 done:
5399 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5400 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5401 }
5402
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5403 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5404 void *data, u16 len)
5405 {
5406 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5407 struct adv_monitor *m = NULL;
5408 u8 status = MGMT_STATUS_SUCCESS;
5409 size_t expected_size = sizeof(*cp);
5410
5411 BT_DBG("request for %s", hdev->name);
5412
5413 if (len <= sizeof(*cp)) {
5414 status = MGMT_STATUS_INVALID_PARAMS;
5415 goto done;
5416 }
5417
5418 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5419 if (len != expected_size) {
5420 status = MGMT_STATUS_INVALID_PARAMS;
5421 goto done;
5422 }
5423
5424 m = kzalloc(sizeof(*m), GFP_KERNEL);
5425 if (!m) {
5426 status = MGMT_STATUS_NO_RESOURCES;
5427 goto done;
5428 }
5429
5430 INIT_LIST_HEAD(&m->patterns);
5431
5432 parse_adv_monitor_rssi(m, &cp->rssi);
5433 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5434
5435 done:
5436 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5437 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5438 }
5439
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5440 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5441 void *data, int status)
5442 {
5443 struct mgmt_rp_remove_adv_monitor rp;
5444 struct mgmt_pending_cmd *cmd = data;
5445 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5446
5447 hci_dev_lock(hdev);
5448
5449 rp.monitor_handle = cp->monitor_handle;
5450
5451 if (!status)
5452 hci_update_passive_scan(hdev);
5453
5454 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5455 mgmt_status(status), &rp, sizeof(rp));
5456 mgmt_pending_remove(cmd);
5457
5458 hci_dev_unlock(hdev);
5459 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5460 rp.monitor_handle, status);
5461 }
5462
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5463 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5464 {
5465 struct mgmt_pending_cmd *cmd = data;
5466 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5467 u16 handle = __le16_to_cpu(cp->monitor_handle);
5468
5469 if (!handle)
5470 return hci_remove_all_adv_monitor(hdev);
5471
5472 return hci_remove_single_adv_monitor(hdev, handle);
5473 }
5474
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5475 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5476 void *data, u16 len)
5477 {
5478 struct mgmt_pending_cmd *cmd;
5479 int err, status;
5480
5481 hci_dev_lock(hdev);
5482
5483 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5484 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5485 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5486 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5487 status = MGMT_STATUS_BUSY;
5488 goto unlock;
5489 }
5490
5491 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5492 if (!cmd) {
5493 status = MGMT_STATUS_NO_RESOURCES;
5494 goto unlock;
5495 }
5496
5497 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5498 mgmt_remove_adv_monitor_complete);
5499
5500 if (err) {
5501 mgmt_pending_remove(cmd);
5502
5503 if (err == -ENOMEM)
5504 status = MGMT_STATUS_NO_RESOURCES;
5505 else
5506 status = MGMT_STATUS_FAILED;
5507
5508 goto unlock;
5509 }
5510
5511 hci_dev_unlock(hdev);
5512
5513 return 0;
5514
5515 unlock:
5516 hci_dev_unlock(hdev);
5517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5518 status);
5519 }
5520
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5521 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5522 {
5523 struct mgmt_rp_read_local_oob_data mgmt_rp;
5524 size_t rp_size = sizeof(mgmt_rp);
5525 struct mgmt_pending_cmd *cmd = data;
5526 struct sk_buff *skb = cmd->skb;
5527 u8 status = mgmt_status(err);
5528
5529 if (!status) {
5530 if (!skb)
5531 status = MGMT_STATUS_FAILED;
5532 else if (IS_ERR(skb))
5533 status = mgmt_status(PTR_ERR(skb));
5534 else
5535 status = mgmt_status(skb->data[0]);
5536 }
5537
5538 bt_dev_dbg(hdev, "status %d", status);
5539
5540 if (status) {
5541 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5542 goto remove;
5543 }
5544
5545 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5546
5547 if (!bredr_sc_enabled(hdev)) {
5548 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5549
5550 if (skb->len < sizeof(*rp)) {
5551 mgmt_cmd_status(cmd->sk, hdev->id,
5552 MGMT_OP_READ_LOCAL_OOB_DATA,
5553 MGMT_STATUS_FAILED);
5554 goto remove;
5555 }
5556
5557 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5558 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5559
5560 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5561 } else {
5562 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5563
5564 if (skb->len < sizeof(*rp)) {
5565 mgmt_cmd_status(cmd->sk, hdev->id,
5566 MGMT_OP_READ_LOCAL_OOB_DATA,
5567 MGMT_STATUS_FAILED);
5568 goto remove;
5569 }
5570
5571 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5572 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5573
5574 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5575 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5576 }
5577
5578 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5579 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5580
5581 remove:
5582 if (skb && !IS_ERR(skb))
5583 kfree_skb(skb);
5584
5585 mgmt_pending_free(cmd);
5586 }
5587
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5588 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5589 {
5590 struct mgmt_pending_cmd *cmd = data;
5591
5592 if (bredr_sc_enabled(hdev))
5593 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5594 else
5595 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5596
5597 if (IS_ERR(cmd->skb))
5598 return PTR_ERR(cmd->skb);
5599 else
5600 return 0;
5601 }
5602
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5603 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5604 void *data, u16 data_len)
5605 {
5606 struct mgmt_pending_cmd *cmd;
5607 int err;
5608
5609 bt_dev_dbg(hdev, "sock %p", sk);
5610
5611 hci_dev_lock(hdev);
5612
5613 if (!hdev_is_powered(hdev)) {
5614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5615 MGMT_STATUS_NOT_POWERED);
5616 goto unlock;
5617 }
5618
5619 if (!lmp_ssp_capable(hdev)) {
5620 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5621 MGMT_STATUS_NOT_SUPPORTED);
5622 goto unlock;
5623 }
5624
5625 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5626 if (!cmd)
5627 err = -ENOMEM;
5628 else
5629 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5630 read_local_oob_data_complete);
5631
5632 if (err < 0) {
5633 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5634 MGMT_STATUS_FAILED);
5635
5636 if (cmd)
5637 mgmt_pending_free(cmd);
5638 }
5639
5640 unlock:
5641 hci_dev_unlock(hdev);
5642 return err;
5643 }
5644
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5645 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5646 void *data, u16 len)
5647 {
5648 struct mgmt_addr_info *addr = data;
5649 int err;
5650
5651 bt_dev_dbg(hdev, "sock %p", sk);
5652
5653 if (!bdaddr_type_is_valid(addr->type))
5654 return mgmt_cmd_complete(sk, hdev->id,
5655 MGMT_OP_ADD_REMOTE_OOB_DATA,
5656 MGMT_STATUS_INVALID_PARAMS,
5657 addr, sizeof(*addr));
5658
5659 hci_dev_lock(hdev);
5660
5661 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5662 struct mgmt_cp_add_remote_oob_data *cp = data;
5663 u8 status;
5664
5665 if (cp->addr.type != BDADDR_BREDR) {
5666 err = mgmt_cmd_complete(sk, hdev->id,
5667 MGMT_OP_ADD_REMOTE_OOB_DATA,
5668 MGMT_STATUS_INVALID_PARAMS,
5669 &cp->addr, sizeof(cp->addr));
5670 goto unlock;
5671 }
5672
5673 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5674 cp->addr.type, cp->hash,
5675 cp->rand, NULL, NULL);
5676 if (err < 0)
5677 status = MGMT_STATUS_FAILED;
5678 else
5679 status = MGMT_STATUS_SUCCESS;
5680
5681 err = mgmt_cmd_complete(sk, hdev->id,
5682 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5683 &cp->addr, sizeof(cp->addr));
5684 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5685 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5686 u8 *rand192, *hash192, *rand256, *hash256;
5687 u8 status;
5688
5689 if (bdaddr_type_is_le(cp->addr.type)) {
5690 /* Enforce zero-valued 192-bit parameters as
5691 * long as legacy SMP OOB isn't implemented.
5692 */
5693 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5694 memcmp(cp->hash192, ZERO_KEY, 16)) {
5695 err = mgmt_cmd_complete(sk, hdev->id,
5696 MGMT_OP_ADD_REMOTE_OOB_DATA,
5697 MGMT_STATUS_INVALID_PARAMS,
5698 addr, sizeof(*addr));
5699 goto unlock;
5700 }
5701
5702 rand192 = NULL;
5703 hash192 = NULL;
5704 } else {
5705 /* In case one of the P-192 values is set to zero,
5706 * then just disable OOB data for P-192.
5707 */
5708 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5709 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5710 rand192 = NULL;
5711 hash192 = NULL;
5712 } else {
5713 rand192 = cp->rand192;
5714 hash192 = cp->hash192;
5715 }
5716 }
5717
5718 /* In case one of the P-256 values is set to zero, then just
5719 * disable OOB data for P-256.
5720 */
5721 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5722 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5723 rand256 = NULL;
5724 hash256 = NULL;
5725 } else {
5726 rand256 = cp->rand256;
5727 hash256 = cp->hash256;
5728 }
5729
5730 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5731 cp->addr.type, hash192, rand192,
5732 hash256, rand256);
5733 if (err < 0)
5734 status = MGMT_STATUS_FAILED;
5735 else
5736 status = MGMT_STATUS_SUCCESS;
5737
5738 err = mgmt_cmd_complete(sk, hdev->id,
5739 MGMT_OP_ADD_REMOTE_OOB_DATA,
5740 status, &cp->addr, sizeof(cp->addr));
5741 } else {
5742 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5743 len);
5744 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5745 MGMT_STATUS_INVALID_PARAMS);
5746 }
5747
5748 unlock:
5749 hci_dev_unlock(hdev);
5750 return err;
5751 }
5752
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5753 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5754 void *data, u16 len)
5755 {
5756 struct mgmt_cp_remove_remote_oob_data *cp = data;
5757 u8 status;
5758 int err;
5759
5760 bt_dev_dbg(hdev, "sock %p", sk);
5761
5762 if (cp->addr.type != BDADDR_BREDR)
5763 return mgmt_cmd_complete(sk, hdev->id,
5764 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5765 MGMT_STATUS_INVALID_PARAMS,
5766 &cp->addr, sizeof(cp->addr));
5767
5768 hci_dev_lock(hdev);
5769
5770 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5771 hci_remote_oob_data_clear(hdev);
5772 status = MGMT_STATUS_SUCCESS;
5773 goto done;
5774 }
5775
5776 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5777 if (err < 0)
5778 status = MGMT_STATUS_INVALID_PARAMS;
5779 else
5780 status = MGMT_STATUS_SUCCESS;
5781
5782 done:
5783 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5784 status, &cp->addr, sizeof(cp->addr));
5785
5786 hci_dev_unlock(hdev);
5787 return err;
5788 }
5789
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5790 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5791 {
5792 struct mgmt_pending_cmd *cmd;
5793
5794 bt_dev_dbg(hdev, "status %u", status);
5795
5796 hci_dev_lock(hdev);
5797
5798 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5799 if (!cmd)
5800 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5801
5802 if (!cmd)
5803 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5804
5805 if (cmd) {
5806 cmd->cmd_complete(cmd, mgmt_status(status));
5807 mgmt_pending_remove(cmd);
5808 }
5809
5810 hci_dev_unlock(hdev);
5811 }
5812
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5813 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5814 uint8_t *mgmt_status)
5815 {
5816 switch (type) {
5817 case DISCOV_TYPE_LE:
5818 *mgmt_status = mgmt_le_support(hdev);
5819 if (*mgmt_status)
5820 return false;
5821 break;
5822 case DISCOV_TYPE_INTERLEAVED:
5823 *mgmt_status = mgmt_le_support(hdev);
5824 if (*mgmt_status)
5825 return false;
5826 fallthrough;
5827 case DISCOV_TYPE_BREDR:
5828 *mgmt_status = mgmt_bredr_support(hdev);
5829 if (*mgmt_status)
5830 return false;
5831 break;
5832 default:
5833 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5834 return false;
5835 }
5836
5837 return true;
5838 }
5839
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5840 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5841 {
5842 struct mgmt_pending_cmd *cmd = data;
5843
5844 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5845 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5846 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5847 return;
5848
5849 bt_dev_dbg(hdev, "err %d", err);
5850
5851 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5852 cmd->param, 1);
5853 mgmt_pending_remove(cmd);
5854
5855 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5856 DISCOVERY_FINDING);
5857 }
5858
start_discovery_sync(struct hci_dev * hdev,void * data)5859 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5860 {
5861 return hci_start_discovery_sync(hdev);
5862 }
5863
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5864 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5865 u16 op, void *data, u16 len)
5866 {
5867 struct mgmt_cp_start_discovery *cp = data;
5868 struct mgmt_pending_cmd *cmd;
5869 u8 status;
5870 int err;
5871
5872 bt_dev_dbg(hdev, "sock %p", sk);
5873
5874 hci_dev_lock(hdev);
5875
5876 if (!hdev_is_powered(hdev)) {
5877 err = mgmt_cmd_complete(sk, hdev->id, op,
5878 MGMT_STATUS_NOT_POWERED,
5879 &cp->type, sizeof(cp->type));
5880 goto failed;
5881 }
5882
5883 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5884 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5885 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5886 &cp->type, sizeof(cp->type));
5887 goto failed;
5888 }
5889
5890 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5891 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5892 &cp->type, sizeof(cp->type));
5893 goto failed;
5894 }
5895
5896 /* Can't start discovery when it is paused */
5897 if (hdev->discovery_paused) {
5898 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5899 &cp->type, sizeof(cp->type));
5900 goto failed;
5901 }
5902
5903 /* Clear the discovery filter first to free any previously
5904 * allocated memory for the UUID list.
5905 */
5906 hci_discovery_filter_clear(hdev);
5907
5908 hdev->discovery.type = cp->type;
5909 hdev->discovery.report_invalid_rssi = false;
5910 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5911 hdev->discovery.limited = true;
5912 else
5913 hdev->discovery.limited = false;
5914
5915 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5916 if (!cmd) {
5917 err = -ENOMEM;
5918 goto failed;
5919 }
5920
5921 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5922 start_discovery_complete);
5923 if (err < 0) {
5924 mgmt_pending_remove(cmd);
5925 goto failed;
5926 }
5927
5928 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5929
5930 failed:
5931 hci_dev_unlock(hdev);
5932 return err;
5933 }
5934
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5935 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5936 void *data, u16 len)
5937 {
5938 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5939 data, len);
5940 }
5941
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5942 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5943 void *data, u16 len)
5944 {
5945 return start_discovery_internal(sk, hdev,
5946 MGMT_OP_START_LIMITED_DISCOVERY,
5947 data, len);
5948 }
5949
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5950 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5951 void *data, u16 len)
5952 {
5953 struct mgmt_cp_start_service_discovery *cp = data;
5954 struct mgmt_pending_cmd *cmd;
5955 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5956 u16 uuid_count, expected_len;
5957 u8 status;
5958 int err;
5959
5960 bt_dev_dbg(hdev, "sock %p", sk);
5961
5962 hci_dev_lock(hdev);
5963
5964 if (!hdev_is_powered(hdev)) {
5965 err = mgmt_cmd_complete(sk, hdev->id,
5966 MGMT_OP_START_SERVICE_DISCOVERY,
5967 MGMT_STATUS_NOT_POWERED,
5968 &cp->type, sizeof(cp->type));
5969 goto failed;
5970 }
5971
5972 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5973 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5974 err = mgmt_cmd_complete(sk, hdev->id,
5975 MGMT_OP_START_SERVICE_DISCOVERY,
5976 MGMT_STATUS_BUSY, &cp->type,
5977 sizeof(cp->type));
5978 goto failed;
5979 }
5980
5981 if (hdev->discovery_paused) {
5982 err = mgmt_cmd_complete(sk, hdev->id,
5983 MGMT_OP_START_SERVICE_DISCOVERY,
5984 MGMT_STATUS_BUSY, &cp->type,
5985 sizeof(cp->type));
5986 goto failed;
5987 }
5988
5989 uuid_count = __le16_to_cpu(cp->uuid_count);
5990 if (uuid_count > max_uuid_count) {
5991 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5992 uuid_count);
5993 err = mgmt_cmd_complete(sk, hdev->id,
5994 MGMT_OP_START_SERVICE_DISCOVERY,
5995 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5996 sizeof(cp->type));
5997 goto failed;
5998 }
5999
6000 expected_len = sizeof(*cp) + uuid_count * 16;
6001 if (expected_len != len) {
6002 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6003 expected_len, len);
6004 err = mgmt_cmd_complete(sk, hdev->id,
6005 MGMT_OP_START_SERVICE_DISCOVERY,
6006 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6007 sizeof(cp->type));
6008 goto failed;
6009 }
6010
6011 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6012 err = mgmt_cmd_complete(sk, hdev->id,
6013 MGMT_OP_START_SERVICE_DISCOVERY,
6014 status, &cp->type, sizeof(cp->type));
6015 goto failed;
6016 }
6017
6018 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6019 hdev, data, len);
6020 if (!cmd) {
6021 err = -ENOMEM;
6022 goto failed;
6023 }
6024
6025 /* Clear the discovery filter first to free any previously
6026 * allocated memory for the UUID list.
6027 */
6028 hci_discovery_filter_clear(hdev);
6029
6030 hdev->discovery.result_filtering = true;
6031 hdev->discovery.type = cp->type;
6032 hdev->discovery.rssi = cp->rssi;
6033 hdev->discovery.uuid_count = uuid_count;
6034
6035 if (uuid_count > 0) {
6036 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6037 GFP_KERNEL);
6038 if (!hdev->discovery.uuids) {
6039 err = mgmt_cmd_complete(sk, hdev->id,
6040 MGMT_OP_START_SERVICE_DISCOVERY,
6041 MGMT_STATUS_FAILED,
6042 &cp->type, sizeof(cp->type));
6043 mgmt_pending_remove(cmd);
6044 goto failed;
6045 }
6046 }
6047
6048 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6049 start_discovery_complete);
6050 if (err < 0) {
6051 mgmt_pending_remove(cmd);
6052 goto failed;
6053 }
6054
6055 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6056
6057 failed:
6058 hci_dev_unlock(hdev);
6059 return err;
6060 }
6061
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6062 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6063 {
6064 struct mgmt_pending_cmd *cmd;
6065
6066 bt_dev_dbg(hdev, "status %u", status);
6067
6068 hci_dev_lock(hdev);
6069
6070 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6071 if (cmd) {
6072 cmd->cmd_complete(cmd, mgmt_status(status));
6073 mgmt_pending_remove(cmd);
6074 }
6075
6076 hci_dev_unlock(hdev);
6077 }
6078
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6079 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6080 {
6081 struct mgmt_pending_cmd *cmd = data;
6082
6083 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6084 return;
6085
6086 bt_dev_dbg(hdev, "err %d", err);
6087
6088 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6089 cmd->param, 1);
6090 mgmt_pending_remove(cmd);
6091
6092 if (!err)
6093 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6094 }
6095
stop_discovery_sync(struct hci_dev * hdev,void * data)6096 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6097 {
6098 return hci_stop_discovery_sync(hdev);
6099 }
6100
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6101 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6102 u16 len)
6103 {
6104 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6105 struct mgmt_pending_cmd *cmd;
6106 int err;
6107
6108 bt_dev_dbg(hdev, "sock %p", sk);
6109
6110 hci_dev_lock(hdev);
6111
6112 if (!hci_discovery_active(hdev)) {
6113 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6114 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6115 sizeof(mgmt_cp->type));
6116 goto unlock;
6117 }
6118
6119 if (hdev->discovery.type != mgmt_cp->type) {
6120 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6121 MGMT_STATUS_INVALID_PARAMS,
6122 &mgmt_cp->type, sizeof(mgmt_cp->type));
6123 goto unlock;
6124 }
6125
6126 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6127 if (!cmd) {
6128 err = -ENOMEM;
6129 goto unlock;
6130 }
6131
6132 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6133 stop_discovery_complete);
6134 if (err < 0) {
6135 mgmt_pending_remove(cmd);
6136 goto unlock;
6137 }
6138
6139 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6140
6141 unlock:
6142 hci_dev_unlock(hdev);
6143 return err;
6144 }
6145
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6146 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6147 u16 len)
6148 {
6149 struct mgmt_cp_confirm_name *cp = data;
6150 struct inquiry_entry *e;
6151 int err;
6152
6153 bt_dev_dbg(hdev, "sock %p", sk);
6154
6155 hci_dev_lock(hdev);
6156
6157 if (!hci_discovery_active(hdev)) {
6158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6159 MGMT_STATUS_FAILED, &cp->addr,
6160 sizeof(cp->addr));
6161 goto failed;
6162 }
6163
6164 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6165 if (!e) {
6166 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6167 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6168 sizeof(cp->addr));
6169 goto failed;
6170 }
6171
6172 if (cp->name_known) {
6173 e->name_state = NAME_KNOWN;
6174 list_del(&e->list);
6175 } else {
6176 e->name_state = NAME_NEEDED;
6177 hci_inquiry_cache_update_resolve(hdev, e);
6178 }
6179
6180 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6181 &cp->addr, sizeof(cp->addr));
6182
6183 failed:
6184 hci_dev_unlock(hdev);
6185 return err;
6186 }
6187
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6188 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6189 u16 len)
6190 {
6191 struct mgmt_cp_block_device *cp = data;
6192 u8 status;
6193 int err;
6194
6195 bt_dev_dbg(hdev, "sock %p", sk);
6196
6197 if (!bdaddr_type_is_valid(cp->addr.type))
6198 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6199 MGMT_STATUS_INVALID_PARAMS,
6200 &cp->addr, sizeof(cp->addr));
6201
6202 hci_dev_lock(hdev);
6203
6204 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6205 cp->addr.type);
6206 if (err < 0) {
6207 status = MGMT_STATUS_FAILED;
6208 goto done;
6209 }
6210
6211 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6212 sk);
6213 status = MGMT_STATUS_SUCCESS;
6214
6215 done:
6216 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6217 &cp->addr, sizeof(cp->addr));
6218
6219 hci_dev_unlock(hdev);
6220
6221 return err;
6222 }
6223
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6224 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6225 u16 len)
6226 {
6227 struct mgmt_cp_unblock_device *cp = data;
6228 u8 status;
6229 int err;
6230
6231 bt_dev_dbg(hdev, "sock %p", sk);
6232
6233 if (!bdaddr_type_is_valid(cp->addr.type))
6234 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6235 MGMT_STATUS_INVALID_PARAMS,
6236 &cp->addr, sizeof(cp->addr));
6237
6238 hci_dev_lock(hdev);
6239
6240 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6241 cp->addr.type);
6242 if (err < 0) {
6243 status = MGMT_STATUS_INVALID_PARAMS;
6244 goto done;
6245 }
6246
6247 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6248 sk);
6249 status = MGMT_STATUS_SUCCESS;
6250
6251 done:
6252 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6253 &cp->addr, sizeof(cp->addr));
6254
6255 hci_dev_unlock(hdev);
6256
6257 return err;
6258 }
6259
set_device_id_sync(struct hci_dev * hdev,void * data)6260 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6261 {
6262 return hci_update_eir_sync(hdev);
6263 }
6264
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6265 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6266 u16 len)
6267 {
6268 struct mgmt_cp_set_device_id *cp = data;
6269 int err;
6270 __u16 source;
6271
6272 bt_dev_dbg(hdev, "sock %p", sk);
6273
6274 source = __le16_to_cpu(cp->source);
6275
6276 if (source > 0x0002)
6277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6278 MGMT_STATUS_INVALID_PARAMS);
6279
6280 hci_dev_lock(hdev);
6281
6282 hdev->devid_source = source;
6283 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6284 hdev->devid_product = __le16_to_cpu(cp->product);
6285 hdev->devid_version = __le16_to_cpu(cp->version);
6286
6287 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6288 NULL, 0);
6289
6290 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6291
6292 hci_dev_unlock(hdev);
6293
6294 return err;
6295 }
6296
enable_advertising_instance(struct hci_dev * hdev,int err)6297 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6298 {
6299 if (err)
6300 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6301 else
6302 bt_dev_dbg(hdev, "status %d", err);
6303 }
6304
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6305 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6306 {
6307 struct cmd_lookup match = { NULL, hdev };
6308 u8 instance;
6309 struct adv_info *adv_instance;
6310 u8 status = mgmt_status(err);
6311
6312 if (status) {
6313 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6314 cmd_status_rsp, &status);
6315 return;
6316 }
6317
6318 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6319 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6320 else
6321 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6322
6323 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6324 &match);
6325
6326 new_settings(hdev, match.sk);
6327
6328 if (match.sk)
6329 sock_put(match.sk);
6330
6331 /* If "Set Advertising" was just disabled and instance advertising was
6332 * set up earlier, then re-enable multi-instance advertising.
6333 */
6334 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6335 list_empty(&hdev->adv_instances))
6336 return;
6337
6338 instance = hdev->cur_adv_instance;
6339 if (!instance) {
6340 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6341 struct adv_info, list);
6342 if (!adv_instance)
6343 return;
6344
6345 instance = adv_instance->instance;
6346 }
6347
6348 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6349
6350 enable_advertising_instance(hdev, err);
6351 }
6352
set_adv_sync(struct hci_dev * hdev,void * data)6353 static int set_adv_sync(struct hci_dev *hdev, void *data)
6354 {
6355 struct mgmt_pending_cmd *cmd = data;
6356 struct mgmt_mode *cp = cmd->param;
6357 u8 val = !!cp->val;
6358
6359 if (cp->val == 0x02)
6360 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6361 else
6362 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6363
6364 cancel_adv_timeout(hdev);
6365
6366 if (val) {
6367 /* Switch to instance "0" for the Set Advertising setting.
6368 * We cannot use update_[adv|scan_rsp]_data() here as the
6369 * HCI_ADVERTISING flag is not yet set.
6370 */
6371 hdev->cur_adv_instance = 0x00;
6372
6373 if (ext_adv_capable(hdev)) {
6374 hci_start_ext_adv_sync(hdev, 0x00);
6375 } else {
6376 hci_update_adv_data_sync(hdev, 0x00);
6377 hci_update_scan_rsp_data_sync(hdev, 0x00);
6378 hci_enable_advertising_sync(hdev);
6379 }
6380 } else {
6381 hci_disable_advertising_sync(hdev);
6382 }
6383
6384 return 0;
6385 }
6386
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6387 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6388 u16 len)
6389 {
6390 struct mgmt_mode *cp = data;
6391 struct mgmt_pending_cmd *cmd;
6392 u8 val, status;
6393 int err;
6394
6395 bt_dev_dbg(hdev, "sock %p", sk);
6396
6397 status = mgmt_le_support(hdev);
6398 if (status)
6399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6400 status);
6401
6402 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6404 MGMT_STATUS_INVALID_PARAMS);
6405
6406 if (hdev->advertising_paused)
6407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6408 MGMT_STATUS_BUSY);
6409
6410 hci_dev_lock(hdev);
6411
6412 val = !!cp->val;
6413
6414 /* The following conditions are ones which mean that we should
6415 * not do any HCI communication but directly send a mgmt
6416 * response to user space (after toggling the flag if
6417 * necessary).
6418 */
6419 if (!hdev_is_powered(hdev) ||
6420 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6421 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6422 hci_dev_test_flag(hdev, HCI_MESH) ||
6423 hci_conn_num(hdev, LE_LINK) > 0 ||
6424 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6425 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6426 bool changed;
6427
6428 if (cp->val) {
6429 hdev->cur_adv_instance = 0x00;
6430 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6431 if (cp->val == 0x02)
6432 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6433 else
6434 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6435 } else {
6436 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6437 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6438 }
6439
6440 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6441 if (err < 0)
6442 goto unlock;
6443
6444 if (changed)
6445 err = new_settings(hdev, sk);
6446
6447 goto unlock;
6448 }
6449
6450 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6451 pending_find(MGMT_OP_SET_LE, hdev)) {
6452 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6453 MGMT_STATUS_BUSY);
6454 goto unlock;
6455 }
6456
6457 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6458 if (!cmd)
6459 err = -ENOMEM;
6460 else
6461 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6462 set_advertising_complete);
6463
6464 if (err < 0 && cmd)
6465 mgmt_pending_remove(cmd);
6466
6467 unlock:
6468 hci_dev_unlock(hdev);
6469 return err;
6470 }
6471
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6472 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6473 void *data, u16 len)
6474 {
6475 struct mgmt_cp_set_static_address *cp = data;
6476 int err;
6477
6478 bt_dev_dbg(hdev, "sock %p", sk);
6479
6480 if (!lmp_le_capable(hdev))
6481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6482 MGMT_STATUS_NOT_SUPPORTED);
6483
6484 if (hdev_is_powered(hdev))
6485 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6486 MGMT_STATUS_REJECTED);
6487
6488 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6489 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6490 return mgmt_cmd_status(sk, hdev->id,
6491 MGMT_OP_SET_STATIC_ADDRESS,
6492 MGMT_STATUS_INVALID_PARAMS);
6493
6494 /* Two most significant bits shall be set */
6495 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6496 return mgmt_cmd_status(sk, hdev->id,
6497 MGMT_OP_SET_STATIC_ADDRESS,
6498 MGMT_STATUS_INVALID_PARAMS);
6499 }
6500
6501 hci_dev_lock(hdev);
6502
6503 bacpy(&hdev->static_addr, &cp->bdaddr);
6504
6505 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6506 if (err < 0)
6507 goto unlock;
6508
6509 err = new_settings(hdev, sk);
6510
6511 unlock:
6512 hci_dev_unlock(hdev);
6513 return err;
6514 }
6515
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6516 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6517 void *data, u16 len)
6518 {
6519 struct mgmt_cp_set_scan_params *cp = data;
6520 __u16 interval, window;
6521 int err;
6522
6523 bt_dev_dbg(hdev, "sock %p", sk);
6524
6525 if (!lmp_le_capable(hdev))
6526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6527 MGMT_STATUS_NOT_SUPPORTED);
6528
6529 interval = __le16_to_cpu(cp->interval);
6530
6531 if (interval < 0x0004 || interval > 0x4000)
6532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6533 MGMT_STATUS_INVALID_PARAMS);
6534
6535 window = __le16_to_cpu(cp->window);
6536
6537 if (window < 0x0004 || window > 0x4000)
6538 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6539 MGMT_STATUS_INVALID_PARAMS);
6540
6541 if (window > interval)
6542 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6543 MGMT_STATUS_INVALID_PARAMS);
6544
6545 hci_dev_lock(hdev);
6546
6547 hdev->le_scan_interval = interval;
6548 hdev->le_scan_window = window;
6549
6550 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6551 NULL, 0);
6552
6553 /* If background scan is running, restart it so new parameters are
6554 * loaded.
6555 */
6556 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6557 hdev->discovery.state == DISCOVERY_STOPPED)
6558 hci_update_passive_scan(hdev);
6559
6560 hci_dev_unlock(hdev);
6561
6562 return err;
6563 }
6564
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6565 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6566 {
6567 struct mgmt_pending_cmd *cmd = data;
6568
6569 bt_dev_dbg(hdev, "err %d", err);
6570
6571 if (err) {
6572 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6573 mgmt_status(err));
6574 } else {
6575 struct mgmt_mode *cp = cmd->param;
6576
6577 if (cp->val)
6578 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6579 else
6580 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6581
6582 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6583 new_settings(hdev, cmd->sk);
6584 }
6585
6586 mgmt_pending_free(cmd);
6587 }
6588
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6589 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6590 {
6591 struct mgmt_pending_cmd *cmd = data;
6592 struct mgmt_mode *cp = cmd->param;
6593
6594 return hci_write_fast_connectable_sync(hdev, cp->val);
6595 }
6596
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6597 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6598 void *data, u16 len)
6599 {
6600 struct mgmt_mode *cp = data;
6601 struct mgmt_pending_cmd *cmd;
6602 int err;
6603
6604 bt_dev_dbg(hdev, "sock %p", sk);
6605
6606 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6607 hdev->hci_ver < BLUETOOTH_VER_1_2)
6608 return mgmt_cmd_status(sk, hdev->id,
6609 MGMT_OP_SET_FAST_CONNECTABLE,
6610 MGMT_STATUS_NOT_SUPPORTED);
6611
6612 if (cp->val != 0x00 && cp->val != 0x01)
6613 return mgmt_cmd_status(sk, hdev->id,
6614 MGMT_OP_SET_FAST_CONNECTABLE,
6615 MGMT_STATUS_INVALID_PARAMS);
6616
6617 hci_dev_lock(hdev);
6618
6619 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6620 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6621 goto unlock;
6622 }
6623
6624 if (!hdev_is_powered(hdev)) {
6625 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6626 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6627 new_settings(hdev, sk);
6628 goto unlock;
6629 }
6630
6631 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6632 len);
6633 if (!cmd)
6634 err = -ENOMEM;
6635 else
6636 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6637 fast_connectable_complete);
6638
6639 if (err < 0) {
6640 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6641 MGMT_STATUS_FAILED);
6642
6643 if (cmd)
6644 mgmt_pending_free(cmd);
6645 }
6646
6647 unlock:
6648 hci_dev_unlock(hdev);
6649
6650 return err;
6651 }
6652
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6653 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6654 {
6655 struct mgmt_pending_cmd *cmd = data;
6656
6657 bt_dev_dbg(hdev, "err %d", err);
6658
6659 if (err) {
6660 u8 mgmt_err = mgmt_status(err);
6661
6662 /* We need to restore the flag if related HCI commands
6663 * failed.
6664 */
6665 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6666
6667 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6668 } else {
6669 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6670 new_settings(hdev, cmd->sk);
6671 }
6672
6673 mgmt_pending_free(cmd);
6674 }
6675
set_bredr_sync(struct hci_dev * hdev,void * data)6676 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6677 {
6678 int status;
6679
6680 status = hci_write_fast_connectable_sync(hdev, false);
6681
6682 if (!status)
6683 status = hci_update_scan_sync(hdev);
6684
6685 /* Since only the advertising data flags will change, there
6686 * is no need to update the scan response data.
6687 */
6688 if (!status)
6689 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6690
6691 return status;
6692 }
6693
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6694 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6695 {
6696 struct mgmt_mode *cp = data;
6697 struct mgmt_pending_cmd *cmd;
6698 int err;
6699
6700 bt_dev_dbg(hdev, "sock %p", sk);
6701
6702 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6703 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6704 MGMT_STATUS_NOT_SUPPORTED);
6705
6706 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6707 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6708 MGMT_STATUS_REJECTED);
6709
6710 if (cp->val != 0x00 && cp->val != 0x01)
6711 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6712 MGMT_STATUS_INVALID_PARAMS);
6713
6714 hci_dev_lock(hdev);
6715
6716 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6717 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6718 goto unlock;
6719 }
6720
6721 if (!hdev_is_powered(hdev)) {
6722 if (!cp->val) {
6723 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6724 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6725 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6726 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6727 }
6728
6729 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6730
6731 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6732 if (err < 0)
6733 goto unlock;
6734
6735 err = new_settings(hdev, sk);
6736 goto unlock;
6737 }
6738
6739 /* Reject disabling when powered on */
6740 if (!cp->val) {
6741 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6742 MGMT_STATUS_REJECTED);
6743 goto unlock;
6744 } else {
6745 /* When configuring a dual-mode controller to operate
6746 * with LE only and using a static address, then switching
6747 * BR/EDR back on is not allowed.
6748 *
6749 * Dual-mode controllers shall operate with the public
6750 * address as its identity address for BR/EDR and LE. So
6751 * reject the attempt to create an invalid configuration.
6752 *
6753 * The same restrictions applies when secure connections
6754 * has been enabled. For BR/EDR this is a controller feature
6755 * while for LE it is a host stack feature. This means that
6756 * switching BR/EDR back on when secure connections has been
6757 * enabled is not a supported transaction.
6758 */
6759 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6760 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6761 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6762 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6763 MGMT_STATUS_REJECTED);
6764 goto unlock;
6765 }
6766 }
6767
6768 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6769 if (!cmd)
6770 err = -ENOMEM;
6771 else
6772 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6773 set_bredr_complete);
6774
6775 if (err < 0) {
6776 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6777 MGMT_STATUS_FAILED);
6778 if (cmd)
6779 mgmt_pending_free(cmd);
6780
6781 goto unlock;
6782 }
6783
6784 /* We need to flip the bit already here so that
6785 * hci_req_update_adv_data generates the correct flags.
6786 */
6787 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6788
6789 unlock:
6790 hci_dev_unlock(hdev);
6791 return err;
6792 }
6793
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6794 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6795 {
6796 struct mgmt_pending_cmd *cmd = data;
6797 struct mgmt_mode *cp;
6798
6799 bt_dev_dbg(hdev, "err %d", err);
6800
6801 if (err) {
6802 u8 mgmt_err = mgmt_status(err);
6803
6804 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6805 goto done;
6806 }
6807
6808 cp = cmd->param;
6809
6810 switch (cp->val) {
6811 case 0x00:
6812 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6813 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6814 break;
6815 case 0x01:
6816 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6817 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6818 break;
6819 case 0x02:
6820 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6821 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6822 break;
6823 }
6824
6825 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6826 new_settings(hdev, cmd->sk);
6827
6828 done:
6829 mgmt_pending_free(cmd);
6830 }
6831
set_secure_conn_sync(struct hci_dev * hdev,void * data)6832 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6833 {
6834 struct mgmt_pending_cmd *cmd = data;
6835 struct mgmt_mode *cp = cmd->param;
6836 u8 val = !!cp->val;
6837
6838 /* Force write of val */
6839 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6840
6841 return hci_write_sc_support_sync(hdev, val);
6842 }
6843
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6844 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6845 void *data, u16 len)
6846 {
6847 struct mgmt_mode *cp = data;
6848 struct mgmt_pending_cmd *cmd;
6849 u8 val;
6850 int err;
6851
6852 bt_dev_dbg(hdev, "sock %p", sk);
6853
6854 if (!lmp_sc_capable(hdev) &&
6855 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6856 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6857 MGMT_STATUS_NOT_SUPPORTED);
6858
6859 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6860 lmp_sc_capable(hdev) &&
6861 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6862 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6863 MGMT_STATUS_REJECTED);
6864
6865 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6866 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6867 MGMT_STATUS_INVALID_PARAMS);
6868
6869 hci_dev_lock(hdev);
6870
6871 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6872 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6873 bool changed;
6874
6875 if (cp->val) {
6876 changed = !hci_dev_test_and_set_flag(hdev,
6877 HCI_SC_ENABLED);
6878 if (cp->val == 0x02)
6879 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6880 else
6881 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6882 } else {
6883 changed = hci_dev_test_and_clear_flag(hdev,
6884 HCI_SC_ENABLED);
6885 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6886 }
6887
6888 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6889 if (err < 0)
6890 goto failed;
6891
6892 if (changed)
6893 err = new_settings(hdev, sk);
6894
6895 goto failed;
6896 }
6897
6898 val = !!cp->val;
6899
6900 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6901 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6902 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6903 goto failed;
6904 }
6905
6906 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6907 if (!cmd)
6908 err = -ENOMEM;
6909 else
6910 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6911 set_secure_conn_complete);
6912
6913 if (err < 0) {
6914 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6915 MGMT_STATUS_FAILED);
6916 if (cmd)
6917 mgmt_pending_free(cmd);
6918 }
6919
6920 failed:
6921 hci_dev_unlock(hdev);
6922 return err;
6923 }
6924
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6925 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6926 void *data, u16 len)
6927 {
6928 struct mgmt_mode *cp = data;
6929 bool changed, use_changed;
6930 int err;
6931
6932 bt_dev_dbg(hdev, "sock %p", sk);
6933
6934 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6936 MGMT_STATUS_INVALID_PARAMS);
6937
6938 hci_dev_lock(hdev);
6939
6940 if (cp->val)
6941 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6942 else
6943 changed = hci_dev_test_and_clear_flag(hdev,
6944 HCI_KEEP_DEBUG_KEYS);
6945
6946 if (cp->val == 0x02)
6947 use_changed = !hci_dev_test_and_set_flag(hdev,
6948 HCI_USE_DEBUG_KEYS);
6949 else
6950 use_changed = hci_dev_test_and_clear_flag(hdev,
6951 HCI_USE_DEBUG_KEYS);
6952
6953 if (hdev_is_powered(hdev) && use_changed &&
6954 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6955 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6956 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6957 sizeof(mode), &mode);
6958 }
6959
6960 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6961 if (err < 0)
6962 goto unlock;
6963
6964 if (changed)
6965 err = new_settings(hdev, sk);
6966
6967 unlock:
6968 hci_dev_unlock(hdev);
6969 return err;
6970 }
6971
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6972 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6973 u16 len)
6974 {
6975 struct mgmt_cp_set_privacy *cp = cp_data;
6976 bool changed;
6977 int err;
6978
6979 bt_dev_dbg(hdev, "sock %p", sk);
6980
6981 if (!lmp_le_capable(hdev))
6982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6983 MGMT_STATUS_NOT_SUPPORTED);
6984
6985 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6987 MGMT_STATUS_INVALID_PARAMS);
6988
6989 if (hdev_is_powered(hdev))
6990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6991 MGMT_STATUS_REJECTED);
6992
6993 hci_dev_lock(hdev);
6994
6995 /* If user space supports this command it is also expected to
6996 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6997 */
6998 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6999
7000 if (cp->privacy) {
7001 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7002 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7003 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7004 hci_adv_instances_set_rpa_expired(hdev, true);
7005 if (cp->privacy == 0x02)
7006 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7007 else
7008 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7009 } else {
7010 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7011 memset(hdev->irk, 0, sizeof(hdev->irk));
7012 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7013 hci_adv_instances_set_rpa_expired(hdev, false);
7014 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7015 }
7016
7017 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7018 if (err < 0)
7019 goto unlock;
7020
7021 if (changed)
7022 err = new_settings(hdev, sk);
7023
7024 unlock:
7025 hci_dev_unlock(hdev);
7026 return err;
7027 }
7028
irk_is_valid(struct mgmt_irk_info * irk)7029 static bool irk_is_valid(struct mgmt_irk_info *irk)
7030 {
7031 switch (irk->addr.type) {
7032 case BDADDR_LE_PUBLIC:
7033 return true;
7034
7035 case BDADDR_LE_RANDOM:
7036 /* Two most significant bits shall be set */
7037 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7038 return false;
7039 return true;
7040 }
7041
7042 return false;
7043 }
7044
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7045 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7046 u16 len)
7047 {
7048 struct mgmt_cp_load_irks *cp = cp_data;
7049 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7050 sizeof(struct mgmt_irk_info));
7051 u16 irk_count, expected_len;
7052 int i, err;
7053
7054 bt_dev_dbg(hdev, "sock %p", sk);
7055
7056 if (!lmp_le_capable(hdev))
7057 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7058 MGMT_STATUS_NOT_SUPPORTED);
7059
7060 irk_count = __le16_to_cpu(cp->irk_count);
7061 if (irk_count > max_irk_count) {
7062 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7063 irk_count);
7064 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7065 MGMT_STATUS_INVALID_PARAMS);
7066 }
7067
7068 expected_len = struct_size(cp, irks, irk_count);
7069 if (expected_len != len) {
7070 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7071 expected_len, len);
7072 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7073 MGMT_STATUS_INVALID_PARAMS);
7074 }
7075
7076 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7077
7078 for (i = 0; i < irk_count; i++) {
7079 struct mgmt_irk_info *key = &cp->irks[i];
7080
7081 if (!irk_is_valid(key))
7082 return mgmt_cmd_status(sk, hdev->id,
7083 MGMT_OP_LOAD_IRKS,
7084 MGMT_STATUS_INVALID_PARAMS);
7085 }
7086
7087 hci_dev_lock(hdev);
7088
7089 hci_smp_irks_clear(hdev);
7090
7091 for (i = 0; i < irk_count; i++) {
7092 struct mgmt_irk_info *irk = &cp->irks[i];
7093
7094 if (hci_is_blocked_key(hdev,
7095 HCI_BLOCKED_KEY_TYPE_IRK,
7096 irk->val)) {
7097 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7098 &irk->addr.bdaddr);
7099 continue;
7100 }
7101
7102 hci_add_irk(hdev, &irk->addr.bdaddr,
7103 le_addr_type(irk->addr.type), irk->val,
7104 BDADDR_ANY);
7105 }
7106
7107 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7108
7109 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7110
7111 hci_dev_unlock(hdev);
7112
7113 return err;
7114 }
7115
ltk_is_valid(struct mgmt_ltk_info * key)7116 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7117 {
7118 if (key->initiator != 0x00 && key->initiator != 0x01)
7119 return false;
7120
7121 switch (key->addr.type) {
7122 case BDADDR_LE_PUBLIC:
7123 return true;
7124
7125 case BDADDR_LE_RANDOM:
7126 /* Two most significant bits shall be set */
7127 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7128 return false;
7129 return true;
7130 }
7131
7132 return false;
7133 }
7134
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7135 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7136 void *cp_data, u16 len)
7137 {
7138 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7139 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7140 sizeof(struct mgmt_ltk_info));
7141 u16 key_count, expected_len;
7142 int i, err;
7143
7144 bt_dev_dbg(hdev, "sock %p", sk);
7145
7146 if (!lmp_le_capable(hdev))
7147 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7148 MGMT_STATUS_NOT_SUPPORTED);
7149
7150 key_count = __le16_to_cpu(cp->key_count);
7151 if (key_count > max_key_count) {
7152 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7153 key_count);
7154 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7155 MGMT_STATUS_INVALID_PARAMS);
7156 }
7157
7158 expected_len = struct_size(cp, keys, key_count);
7159 if (expected_len != len) {
7160 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7161 expected_len, len);
7162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7163 MGMT_STATUS_INVALID_PARAMS);
7164 }
7165
7166 bt_dev_dbg(hdev, "key_count %u", key_count);
7167
7168 hci_dev_lock(hdev);
7169
7170 hci_smp_ltks_clear(hdev);
7171
7172 for (i = 0; i < key_count; i++) {
7173 struct mgmt_ltk_info *key = &cp->keys[i];
7174 u8 type, authenticated;
7175
7176 if (hci_is_blocked_key(hdev,
7177 HCI_BLOCKED_KEY_TYPE_LTK,
7178 key->val)) {
7179 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7180 &key->addr.bdaddr);
7181 continue;
7182 }
7183
7184 if (!ltk_is_valid(key)) {
7185 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7186 &key->addr.bdaddr);
7187 continue;
7188 }
7189
7190 switch (key->type) {
7191 case MGMT_LTK_UNAUTHENTICATED:
7192 authenticated = 0x00;
7193 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7194 break;
7195 case MGMT_LTK_AUTHENTICATED:
7196 authenticated = 0x01;
7197 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7198 break;
7199 case MGMT_LTK_P256_UNAUTH:
7200 authenticated = 0x00;
7201 type = SMP_LTK_P256;
7202 break;
7203 case MGMT_LTK_P256_AUTH:
7204 authenticated = 0x01;
7205 type = SMP_LTK_P256;
7206 break;
7207 case MGMT_LTK_P256_DEBUG:
7208 authenticated = 0x00;
7209 type = SMP_LTK_P256_DEBUG;
7210 fallthrough;
7211 default:
7212 continue;
7213 }
7214
7215 hci_add_ltk(hdev, &key->addr.bdaddr,
7216 le_addr_type(key->addr.type), type, authenticated,
7217 key->val, key->enc_size, key->ediv, key->rand);
7218 }
7219
7220 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7221 NULL, 0);
7222
7223 hci_dev_unlock(hdev);
7224
7225 return err;
7226 }
7227
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7228 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7229 {
7230 struct mgmt_pending_cmd *cmd = data;
7231 struct hci_conn *conn = cmd->user_data;
7232 struct mgmt_cp_get_conn_info *cp = cmd->param;
7233 struct mgmt_rp_get_conn_info rp;
7234 u8 status;
7235
7236 bt_dev_dbg(hdev, "err %d", err);
7237
7238 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7239
7240 status = mgmt_status(err);
7241 if (status == MGMT_STATUS_SUCCESS) {
7242 rp.rssi = conn->rssi;
7243 rp.tx_power = conn->tx_power;
7244 rp.max_tx_power = conn->max_tx_power;
7245 } else {
7246 rp.rssi = HCI_RSSI_INVALID;
7247 rp.tx_power = HCI_TX_POWER_INVALID;
7248 rp.max_tx_power = HCI_TX_POWER_INVALID;
7249 }
7250
7251 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7252 &rp, sizeof(rp));
7253
7254 mgmt_pending_free(cmd);
7255 }
7256
get_conn_info_sync(struct hci_dev * hdev,void * data)7257 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7258 {
7259 struct mgmt_pending_cmd *cmd = data;
7260 struct mgmt_cp_get_conn_info *cp = cmd->param;
7261 struct hci_conn *conn;
7262 int err;
7263 __le16 handle;
7264
7265 /* Make sure we are still connected */
7266 if (cp->addr.type == BDADDR_BREDR)
7267 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7268 &cp->addr.bdaddr);
7269 else
7270 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7271
7272 if (!conn || conn->state != BT_CONNECTED)
7273 return MGMT_STATUS_NOT_CONNECTED;
7274
7275 cmd->user_data = conn;
7276 handle = cpu_to_le16(conn->handle);
7277
7278 /* Refresh RSSI each time */
7279 err = hci_read_rssi_sync(hdev, handle);
7280
7281 /* For LE links TX power does not change thus we don't need to
7282 * query for it once value is known.
7283 */
7284 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7285 conn->tx_power == HCI_TX_POWER_INVALID))
7286 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7287
7288 /* Max TX power needs to be read only once per connection */
7289 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7290 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7291
7292 return err;
7293 }
7294
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7295 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7296 u16 len)
7297 {
7298 struct mgmt_cp_get_conn_info *cp = data;
7299 struct mgmt_rp_get_conn_info rp;
7300 struct hci_conn *conn;
7301 unsigned long conn_info_age;
7302 int err = 0;
7303
7304 bt_dev_dbg(hdev, "sock %p", sk);
7305
7306 memset(&rp, 0, sizeof(rp));
7307 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7308 rp.addr.type = cp->addr.type;
7309
7310 if (!bdaddr_type_is_valid(cp->addr.type))
7311 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7312 MGMT_STATUS_INVALID_PARAMS,
7313 &rp, sizeof(rp));
7314
7315 hci_dev_lock(hdev);
7316
7317 if (!hdev_is_powered(hdev)) {
7318 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7319 MGMT_STATUS_NOT_POWERED, &rp,
7320 sizeof(rp));
7321 goto unlock;
7322 }
7323
7324 if (cp->addr.type == BDADDR_BREDR)
7325 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7326 &cp->addr.bdaddr);
7327 else
7328 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7329
7330 if (!conn || conn->state != BT_CONNECTED) {
7331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7332 MGMT_STATUS_NOT_CONNECTED, &rp,
7333 sizeof(rp));
7334 goto unlock;
7335 }
7336
7337 /* To avoid client trying to guess when to poll again for information we
7338 * calculate conn info age as random value between min/max set in hdev.
7339 */
7340 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7341 hdev->conn_info_max_age - 1);
7342
7343 /* Query controller to refresh cached values if they are too old or were
7344 * never read.
7345 */
7346 if (time_after(jiffies, conn->conn_info_timestamp +
7347 msecs_to_jiffies(conn_info_age)) ||
7348 !conn->conn_info_timestamp) {
7349 struct mgmt_pending_cmd *cmd;
7350
7351 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7352 len);
7353 if (!cmd) {
7354 err = -ENOMEM;
7355 } else {
7356 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7357 cmd, get_conn_info_complete);
7358 }
7359
7360 if (err < 0) {
7361 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7362 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7363
7364 if (cmd)
7365 mgmt_pending_free(cmd);
7366
7367 goto unlock;
7368 }
7369
7370 conn->conn_info_timestamp = jiffies;
7371 } else {
7372 /* Cache is valid, just reply with values cached in hci_conn */
7373 rp.rssi = conn->rssi;
7374 rp.tx_power = conn->tx_power;
7375 rp.max_tx_power = conn->max_tx_power;
7376
7377 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7378 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7379 }
7380
7381 unlock:
7382 hci_dev_unlock(hdev);
7383 return err;
7384 }
7385
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7386 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7387 {
7388 struct mgmt_pending_cmd *cmd = data;
7389 struct mgmt_cp_get_clock_info *cp = cmd->param;
7390 struct mgmt_rp_get_clock_info rp;
7391 struct hci_conn *conn = cmd->user_data;
7392 u8 status = mgmt_status(err);
7393
7394 bt_dev_dbg(hdev, "err %d", err);
7395
7396 memset(&rp, 0, sizeof(rp));
7397 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7398 rp.addr.type = cp->addr.type;
7399
7400 if (err)
7401 goto complete;
7402
7403 rp.local_clock = cpu_to_le32(hdev->clock);
7404
7405 if (conn) {
7406 rp.piconet_clock = cpu_to_le32(conn->clock);
7407 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7408 }
7409
7410 complete:
7411 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7412 sizeof(rp));
7413
7414 mgmt_pending_free(cmd);
7415 }
7416
get_clock_info_sync(struct hci_dev * hdev,void * data)7417 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7418 {
7419 struct mgmt_pending_cmd *cmd = data;
7420 struct mgmt_cp_get_clock_info *cp = cmd->param;
7421 struct hci_cp_read_clock hci_cp;
7422 struct hci_conn *conn;
7423
7424 memset(&hci_cp, 0, sizeof(hci_cp));
7425 hci_read_clock_sync(hdev, &hci_cp);
7426
7427 /* Make sure connection still exists */
7428 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7429 if (!conn || conn->state != BT_CONNECTED)
7430 return MGMT_STATUS_NOT_CONNECTED;
7431
7432 cmd->user_data = conn;
7433 hci_cp.handle = cpu_to_le16(conn->handle);
7434 hci_cp.which = 0x01; /* Piconet clock */
7435
7436 return hci_read_clock_sync(hdev, &hci_cp);
7437 }
7438
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7439 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7440 u16 len)
7441 {
7442 struct mgmt_cp_get_clock_info *cp = data;
7443 struct mgmt_rp_get_clock_info rp;
7444 struct mgmt_pending_cmd *cmd;
7445 struct hci_conn *conn;
7446 int err;
7447
7448 bt_dev_dbg(hdev, "sock %p", sk);
7449
7450 memset(&rp, 0, sizeof(rp));
7451 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7452 rp.addr.type = cp->addr.type;
7453
7454 if (cp->addr.type != BDADDR_BREDR)
7455 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7456 MGMT_STATUS_INVALID_PARAMS,
7457 &rp, sizeof(rp));
7458
7459 hci_dev_lock(hdev);
7460
7461 if (!hdev_is_powered(hdev)) {
7462 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7463 MGMT_STATUS_NOT_POWERED, &rp,
7464 sizeof(rp));
7465 goto unlock;
7466 }
7467
7468 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7469 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7470 &cp->addr.bdaddr);
7471 if (!conn || conn->state != BT_CONNECTED) {
7472 err = mgmt_cmd_complete(sk, hdev->id,
7473 MGMT_OP_GET_CLOCK_INFO,
7474 MGMT_STATUS_NOT_CONNECTED,
7475 &rp, sizeof(rp));
7476 goto unlock;
7477 }
7478 } else {
7479 conn = NULL;
7480 }
7481
7482 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7483 if (!cmd)
7484 err = -ENOMEM;
7485 else
7486 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7487 get_clock_info_complete);
7488
7489 if (err < 0) {
7490 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7491 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7492
7493 if (cmd)
7494 mgmt_pending_free(cmd);
7495 }
7496
7497
7498 unlock:
7499 hci_dev_unlock(hdev);
7500 return err;
7501 }
7502
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7503 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7504 {
7505 struct hci_conn *conn;
7506
7507 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7508 if (!conn)
7509 return false;
7510
7511 if (conn->dst_type != type)
7512 return false;
7513
7514 if (conn->state != BT_CONNECTED)
7515 return false;
7516
7517 return true;
7518 }
7519
7520 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7521 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7522 u8 addr_type, u8 auto_connect)
7523 {
7524 struct hci_conn_params *params;
7525
7526 params = hci_conn_params_add(hdev, addr, addr_type);
7527 if (!params)
7528 return -EIO;
7529
7530 if (params->auto_connect == auto_connect)
7531 return 0;
7532
7533 hci_pend_le_list_del_init(params);
7534
7535 switch (auto_connect) {
7536 case HCI_AUTO_CONN_DISABLED:
7537 case HCI_AUTO_CONN_LINK_LOSS:
7538 /* If auto connect is being disabled when we're trying to
7539 * connect to device, keep connecting.
7540 */
7541 if (params->explicit_connect)
7542 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7543 break;
7544 case HCI_AUTO_CONN_REPORT:
7545 if (params->explicit_connect)
7546 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7547 else
7548 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7549 break;
7550 case HCI_AUTO_CONN_DIRECT:
7551 case HCI_AUTO_CONN_ALWAYS:
7552 if (!is_connected(hdev, addr, addr_type))
7553 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7554 break;
7555 }
7556
7557 params->auto_connect = auto_connect;
7558
7559 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7560 addr, addr_type, auto_connect);
7561
7562 return 0;
7563 }
7564
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7565 static void device_added(struct sock *sk, struct hci_dev *hdev,
7566 bdaddr_t *bdaddr, u8 type, u8 action)
7567 {
7568 struct mgmt_ev_device_added ev;
7569
7570 bacpy(&ev.addr.bdaddr, bdaddr);
7571 ev.addr.type = type;
7572 ev.action = action;
7573
7574 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7575 }
7576
add_device_sync(struct hci_dev * hdev,void * data)7577 static int add_device_sync(struct hci_dev *hdev, void *data)
7578 {
7579 return hci_update_passive_scan_sync(hdev);
7580 }
7581
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7582 static int add_device(struct sock *sk, struct hci_dev *hdev,
7583 void *data, u16 len)
7584 {
7585 struct mgmt_cp_add_device *cp = data;
7586 u8 auto_conn, addr_type;
7587 struct hci_conn_params *params;
7588 int err;
7589 u32 current_flags = 0;
7590 u32 supported_flags;
7591
7592 bt_dev_dbg(hdev, "sock %p", sk);
7593
7594 if (!bdaddr_type_is_valid(cp->addr.type) ||
7595 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7596 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7597 MGMT_STATUS_INVALID_PARAMS,
7598 &cp->addr, sizeof(cp->addr));
7599
7600 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7602 MGMT_STATUS_INVALID_PARAMS,
7603 &cp->addr, sizeof(cp->addr));
7604
7605 hci_dev_lock(hdev);
7606
7607 if (cp->addr.type == BDADDR_BREDR) {
7608 /* Only incoming connections action is supported for now */
7609 if (cp->action != 0x01) {
7610 err = mgmt_cmd_complete(sk, hdev->id,
7611 MGMT_OP_ADD_DEVICE,
7612 MGMT_STATUS_INVALID_PARAMS,
7613 &cp->addr, sizeof(cp->addr));
7614 goto unlock;
7615 }
7616
7617 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7618 &cp->addr.bdaddr,
7619 cp->addr.type, 0);
7620 if (err)
7621 goto unlock;
7622
7623 hci_update_scan(hdev);
7624
7625 goto added;
7626 }
7627
7628 addr_type = le_addr_type(cp->addr.type);
7629
7630 if (cp->action == 0x02)
7631 auto_conn = HCI_AUTO_CONN_ALWAYS;
7632 else if (cp->action == 0x01)
7633 auto_conn = HCI_AUTO_CONN_DIRECT;
7634 else
7635 auto_conn = HCI_AUTO_CONN_REPORT;
7636
7637 /* Kernel internally uses conn_params with resolvable private
7638 * address, but Add Device allows only identity addresses.
7639 * Make sure it is enforced before calling
7640 * hci_conn_params_lookup.
7641 */
7642 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7643 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7644 MGMT_STATUS_INVALID_PARAMS,
7645 &cp->addr, sizeof(cp->addr));
7646 goto unlock;
7647 }
7648
7649 /* If the connection parameters don't exist for this device,
7650 * they will be created and configured with defaults.
7651 */
7652 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7653 auto_conn) < 0) {
7654 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7655 MGMT_STATUS_FAILED, &cp->addr,
7656 sizeof(cp->addr));
7657 goto unlock;
7658 } else {
7659 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7660 addr_type);
7661 if (params)
7662 current_flags = params->flags;
7663 }
7664
7665 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7666 if (err < 0)
7667 goto unlock;
7668
7669 added:
7670 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7671 supported_flags = hdev->conn_flags;
7672 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7673 supported_flags, current_flags);
7674
7675 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7676 MGMT_STATUS_SUCCESS, &cp->addr,
7677 sizeof(cp->addr));
7678
7679 unlock:
7680 hci_dev_unlock(hdev);
7681 return err;
7682 }
7683
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7684 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7685 bdaddr_t *bdaddr, u8 type)
7686 {
7687 struct mgmt_ev_device_removed ev;
7688
7689 bacpy(&ev.addr.bdaddr, bdaddr);
7690 ev.addr.type = type;
7691
7692 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7693 }
7694
remove_device_sync(struct hci_dev * hdev,void * data)7695 static int remove_device_sync(struct hci_dev *hdev, void *data)
7696 {
7697 return hci_update_passive_scan_sync(hdev);
7698 }
7699
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7700 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7701 void *data, u16 len)
7702 {
7703 struct mgmt_cp_remove_device *cp = data;
7704 int err;
7705
7706 bt_dev_dbg(hdev, "sock %p", sk);
7707
7708 hci_dev_lock(hdev);
7709
7710 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7711 struct hci_conn_params *params;
7712 u8 addr_type;
7713
7714 if (!bdaddr_type_is_valid(cp->addr.type)) {
7715 err = mgmt_cmd_complete(sk, hdev->id,
7716 MGMT_OP_REMOVE_DEVICE,
7717 MGMT_STATUS_INVALID_PARAMS,
7718 &cp->addr, sizeof(cp->addr));
7719 goto unlock;
7720 }
7721
7722 if (cp->addr.type == BDADDR_BREDR) {
7723 err = hci_bdaddr_list_del(&hdev->accept_list,
7724 &cp->addr.bdaddr,
7725 cp->addr.type);
7726 if (err) {
7727 err = mgmt_cmd_complete(sk, hdev->id,
7728 MGMT_OP_REMOVE_DEVICE,
7729 MGMT_STATUS_INVALID_PARAMS,
7730 &cp->addr,
7731 sizeof(cp->addr));
7732 goto unlock;
7733 }
7734
7735 hci_update_scan(hdev);
7736
7737 device_removed(sk, hdev, &cp->addr.bdaddr,
7738 cp->addr.type);
7739 goto complete;
7740 }
7741
7742 addr_type = le_addr_type(cp->addr.type);
7743
7744 /* Kernel internally uses conn_params with resolvable private
7745 * address, but Remove Device allows only identity addresses.
7746 * Make sure it is enforced before calling
7747 * hci_conn_params_lookup.
7748 */
7749 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7750 err = mgmt_cmd_complete(sk, hdev->id,
7751 MGMT_OP_REMOVE_DEVICE,
7752 MGMT_STATUS_INVALID_PARAMS,
7753 &cp->addr, sizeof(cp->addr));
7754 goto unlock;
7755 }
7756
7757 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7758 addr_type);
7759 if (!params) {
7760 err = mgmt_cmd_complete(sk, hdev->id,
7761 MGMT_OP_REMOVE_DEVICE,
7762 MGMT_STATUS_INVALID_PARAMS,
7763 &cp->addr, sizeof(cp->addr));
7764 goto unlock;
7765 }
7766
7767 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7768 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7769 err = mgmt_cmd_complete(sk, hdev->id,
7770 MGMT_OP_REMOVE_DEVICE,
7771 MGMT_STATUS_INVALID_PARAMS,
7772 &cp->addr, sizeof(cp->addr));
7773 goto unlock;
7774 }
7775
7776 hci_conn_params_free(params);
7777
7778 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7779 } else {
7780 struct hci_conn_params *p, *tmp;
7781 struct bdaddr_list *b, *btmp;
7782
7783 if (cp->addr.type) {
7784 err = mgmt_cmd_complete(sk, hdev->id,
7785 MGMT_OP_REMOVE_DEVICE,
7786 MGMT_STATUS_INVALID_PARAMS,
7787 &cp->addr, sizeof(cp->addr));
7788 goto unlock;
7789 }
7790
7791 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7792 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7793 list_del(&b->list);
7794 kfree(b);
7795 }
7796
7797 hci_update_scan(hdev);
7798
7799 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7800 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7801 continue;
7802 device_removed(sk, hdev, &p->addr, p->addr_type);
7803 if (p->explicit_connect) {
7804 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7805 continue;
7806 }
7807 hci_conn_params_free(p);
7808 }
7809
7810 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7811 }
7812
7813 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7814
7815 complete:
7816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7817 MGMT_STATUS_SUCCESS, &cp->addr,
7818 sizeof(cp->addr));
7819 unlock:
7820 hci_dev_unlock(hdev);
7821 return err;
7822 }
7823
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7824 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7825 u16 len)
7826 {
7827 struct mgmt_cp_load_conn_param *cp = data;
7828 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7829 sizeof(struct mgmt_conn_param));
7830 u16 param_count, expected_len;
7831 int i;
7832
7833 if (!lmp_le_capable(hdev))
7834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7835 MGMT_STATUS_NOT_SUPPORTED);
7836
7837 param_count = __le16_to_cpu(cp->param_count);
7838 if (param_count > max_param_count) {
7839 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7840 param_count);
7841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7842 MGMT_STATUS_INVALID_PARAMS);
7843 }
7844
7845 expected_len = struct_size(cp, params, param_count);
7846 if (expected_len != len) {
7847 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7848 expected_len, len);
7849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7850 MGMT_STATUS_INVALID_PARAMS);
7851 }
7852
7853 bt_dev_dbg(hdev, "param_count %u", param_count);
7854
7855 hci_dev_lock(hdev);
7856
7857 hci_conn_params_clear_disabled(hdev);
7858
7859 for (i = 0; i < param_count; i++) {
7860 struct mgmt_conn_param *param = &cp->params[i];
7861 struct hci_conn_params *hci_param;
7862 u16 min, max, latency, timeout;
7863 u8 addr_type;
7864
7865 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7866 param->addr.type);
7867
7868 if (param->addr.type == BDADDR_LE_PUBLIC) {
7869 addr_type = ADDR_LE_DEV_PUBLIC;
7870 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7871 addr_type = ADDR_LE_DEV_RANDOM;
7872 } else {
7873 bt_dev_err(hdev, "ignoring invalid connection parameters");
7874 continue;
7875 }
7876
7877 min = le16_to_cpu(param->min_interval);
7878 max = le16_to_cpu(param->max_interval);
7879 latency = le16_to_cpu(param->latency);
7880 timeout = le16_to_cpu(param->timeout);
7881
7882 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7883 min, max, latency, timeout);
7884
7885 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7886 bt_dev_err(hdev, "ignoring invalid connection parameters");
7887 continue;
7888 }
7889
7890 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7891 addr_type);
7892 if (!hci_param) {
7893 bt_dev_err(hdev, "failed to add connection parameters");
7894 continue;
7895 }
7896
7897 hci_param->conn_min_interval = min;
7898 hci_param->conn_max_interval = max;
7899 hci_param->conn_latency = latency;
7900 hci_param->supervision_timeout = timeout;
7901 }
7902
7903 hci_dev_unlock(hdev);
7904
7905 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7906 NULL, 0);
7907 }
7908
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7909 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7910 void *data, u16 len)
7911 {
7912 struct mgmt_cp_set_external_config *cp = data;
7913 bool changed;
7914 int err;
7915
7916 bt_dev_dbg(hdev, "sock %p", sk);
7917
7918 if (hdev_is_powered(hdev))
7919 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7920 MGMT_STATUS_REJECTED);
7921
7922 if (cp->config != 0x00 && cp->config != 0x01)
7923 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7924 MGMT_STATUS_INVALID_PARAMS);
7925
7926 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7927 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7928 MGMT_STATUS_NOT_SUPPORTED);
7929
7930 hci_dev_lock(hdev);
7931
7932 if (cp->config)
7933 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7934 else
7935 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7936
7937 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7938 if (err < 0)
7939 goto unlock;
7940
7941 if (!changed)
7942 goto unlock;
7943
7944 err = new_options(hdev, sk);
7945
7946 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7947 mgmt_index_removed(hdev);
7948
7949 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7950 hci_dev_set_flag(hdev, HCI_CONFIG);
7951 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7952
7953 queue_work(hdev->req_workqueue, &hdev->power_on);
7954 } else {
7955 set_bit(HCI_RAW, &hdev->flags);
7956 mgmt_index_added(hdev);
7957 }
7958 }
7959
7960 unlock:
7961 hci_dev_unlock(hdev);
7962 return err;
7963 }
7964
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7965 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7966 void *data, u16 len)
7967 {
7968 struct mgmt_cp_set_public_address *cp = data;
7969 bool changed;
7970 int err;
7971
7972 bt_dev_dbg(hdev, "sock %p", sk);
7973
7974 if (hdev_is_powered(hdev))
7975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7976 MGMT_STATUS_REJECTED);
7977
7978 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7979 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7980 MGMT_STATUS_INVALID_PARAMS);
7981
7982 if (!hdev->set_bdaddr)
7983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7984 MGMT_STATUS_NOT_SUPPORTED);
7985
7986 hci_dev_lock(hdev);
7987
7988 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7989 bacpy(&hdev->public_addr, &cp->bdaddr);
7990
7991 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7992 if (err < 0)
7993 goto unlock;
7994
7995 if (!changed)
7996 goto unlock;
7997
7998 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7999 err = new_options(hdev, sk);
8000
8001 if (is_configured(hdev)) {
8002 mgmt_index_removed(hdev);
8003
8004 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8005
8006 hci_dev_set_flag(hdev, HCI_CONFIG);
8007 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8008
8009 queue_work(hdev->req_workqueue, &hdev->power_on);
8010 }
8011
8012 unlock:
8013 hci_dev_unlock(hdev);
8014 return err;
8015 }
8016
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8017 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8018 int err)
8019 {
8020 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8021 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8022 u8 *h192, *r192, *h256, *r256;
8023 struct mgmt_pending_cmd *cmd = data;
8024 struct sk_buff *skb = cmd->skb;
8025 u8 status = mgmt_status(err);
8026 u16 eir_len;
8027
8028 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8029 return;
8030
8031 if (!status) {
8032 if (!skb)
8033 status = MGMT_STATUS_FAILED;
8034 else if (IS_ERR(skb))
8035 status = mgmt_status(PTR_ERR(skb));
8036 else
8037 status = mgmt_status(skb->data[0]);
8038 }
8039
8040 bt_dev_dbg(hdev, "status %u", status);
8041
8042 mgmt_cp = cmd->param;
8043
8044 if (status) {
8045 status = mgmt_status(status);
8046 eir_len = 0;
8047
8048 h192 = NULL;
8049 r192 = NULL;
8050 h256 = NULL;
8051 r256 = NULL;
8052 } else if (!bredr_sc_enabled(hdev)) {
8053 struct hci_rp_read_local_oob_data *rp;
8054
8055 if (skb->len != sizeof(*rp)) {
8056 status = MGMT_STATUS_FAILED;
8057 eir_len = 0;
8058 } else {
8059 status = MGMT_STATUS_SUCCESS;
8060 rp = (void *)skb->data;
8061
8062 eir_len = 5 + 18 + 18;
8063 h192 = rp->hash;
8064 r192 = rp->rand;
8065 h256 = NULL;
8066 r256 = NULL;
8067 }
8068 } else {
8069 struct hci_rp_read_local_oob_ext_data *rp;
8070
8071 if (skb->len != sizeof(*rp)) {
8072 status = MGMT_STATUS_FAILED;
8073 eir_len = 0;
8074 } else {
8075 status = MGMT_STATUS_SUCCESS;
8076 rp = (void *)skb->data;
8077
8078 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8079 eir_len = 5 + 18 + 18;
8080 h192 = NULL;
8081 r192 = NULL;
8082 } else {
8083 eir_len = 5 + 18 + 18 + 18 + 18;
8084 h192 = rp->hash192;
8085 r192 = rp->rand192;
8086 }
8087
8088 h256 = rp->hash256;
8089 r256 = rp->rand256;
8090 }
8091 }
8092
8093 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8094 if (!mgmt_rp)
8095 goto done;
8096
8097 if (eir_len == 0)
8098 goto send_rsp;
8099
8100 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8101 hdev->dev_class, 3);
8102
8103 if (h192 && r192) {
8104 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8105 EIR_SSP_HASH_C192, h192, 16);
8106 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8107 EIR_SSP_RAND_R192, r192, 16);
8108 }
8109
8110 if (h256 && r256) {
8111 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8112 EIR_SSP_HASH_C256, h256, 16);
8113 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8114 EIR_SSP_RAND_R256, r256, 16);
8115 }
8116
8117 send_rsp:
8118 mgmt_rp->type = mgmt_cp->type;
8119 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8120
8121 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8122 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8123 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8124 if (err < 0 || status)
8125 goto done;
8126
8127 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8128
8129 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8130 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8131 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8132 done:
8133 if (skb && !IS_ERR(skb))
8134 kfree_skb(skb);
8135
8136 kfree(mgmt_rp);
8137 mgmt_pending_remove(cmd);
8138 }
8139
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8140 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8141 struct mgmt_cp_read_local_oob_ext_data *cp)
8142 {
8143 struct mgmt_pending_cmd *cmd;
8144 int err;
8145
8146 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8147 cp, sizeof(*cp));
8148 if (!cmd)
8149 return -ENOMEM;
8150
8151 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8152 read_local_oob_ext_data_complete);
8153
8154 if (err < 0) {
8155 mgmt_pending_remove(cmd);
8156 return err;
8157 }
8158
8159 return 0;
8160 }
8161
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8162 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8163 void *data, u16 data_len)
8164 {
8165 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8166 struct mgmt_rp_read_local_oob_ext_data *rp;
8167 size_t rp_len;
8168 u16 eir_len;
8169 u8 status, flags, role, addr[7], hash[16], rand[16];
8170 int err;
8171
8172 bt_dev_dbg(hdev, "sock %p", sk);
8173
8174 if (hdev_is_powered(hdev)) {
8175 switch (cp->type) {
8176 case BIT(BDADDR_BREDR):
8177 status = mgmt_bredr_support(hdev);
8178 if (status)
8179 eir_len = 0;
8180 else
8181 eir_len = 5;
8182 break;
8183 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8184 status = mgmt_le_support(hdev);
8185 if (status)
8186 eir_len = 0;
8187 else
8188 eir_len = 9 + 3 + 18 + 18 + 3;
8189 break;
8190 default:
8191 status = MGMT_STATUS_INVALID_PARAMS;
8192 eir_len = 0;
8193 break;
8194 }
8195 } else {
8196 status = MGMT_STATUS_NOT_POWERED;
8197 eir_len = 0;
8198 }
8199
8200 rp_len = sizeof(*rp) + eir_len;
8201 rp = kmalloc(rp_len, GFP_ATOMIC);
8202 if (!rp)
8203 return -ENOMEM;
8204
8205 if (!status && !lmp_ssp_capable(hdev)) {
8206 status = MGMT_STATUS_NOT_SUPPORTED;
8207 eir_len = 0;
8208 }
8209
8210 if (status)
8211 goto complete;
8212
8213 hci_dev_lock(hdev);
8214
8215 eir_len = 0;
8216 switch (cp->type) {
8217 case BIT(BDADDR_BREDR):
8218 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8219 err = read_local_ssp_oob_req(hdev, sk, cp);
8220 hci_dev_unlock(hdev);
8221 if (!err)
8222 goto done;
8223
8224 status = MGMT_STATUS_FAILED;
8225 goto complete;
8226 } else {
8227 eir_len = eir_append_data(rp->eir, eir_len,
8228 EIR_CLASS_OF_DEV,
8229 hdev->dev_class, 3);
8230 }
8231 break;
8232 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8233 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8234 smp_generate_oob(hdev, hash, rand) < 0) {
8235 hci_dev_unlock(hdev);
8236 status = MGMT_STATUS_FAILED;
8237 goto complete;
8238 }
8239
8240 /* This should return the active RPA, but since the RPA
8241 * is only programmed on demand, it is really hard to fill
8242 * this in at the moment. For now disallow retrieving
8243 * local out-of-band data when privacy is in use.
8244 *
8245 * Returning the identity address will not help here since
8246 * pairing happens before the identity resolving key is
8247 * known and thus the connection establishment happens
8248 * based on the RPA and not the identity address.
8249 */
8250 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8251 hci_dev_unlock(hdev);
8252 status = MGMT_STATUS_REJECTED;
8253 goto complete;
8254 }
8255
8256 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8257 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8258 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8259 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8260 memcpy(addr, &hdev->static_addr, 6);
8261 addr[6] = 0x01;
8262 } else {
8263 memcpy(addr, &hdev->bdaddr, 6);
8264 addr[6] = 0x00;
8265 }
8266
8267 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8268 addr, sizeof(addr));
8269
8270 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8271 role = 0x02;
8272 else
8273 role = 0x01;
8274
8275 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8276 &role, sizeof(role));
8277
8278 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8279 eir_len = eir_append_data(rp->eir, eir_len,
8280 EIR_LE_SC_CONFIRM,
8281 hash, sizeof(hash));
8282
8283 eir_len = eir_append_data(rp->eir, eir_len,
8284 EIR_LE_SC_RANDOM,
8285 rand, sizeof(rand));
8286 }
8287
8288 flags = mgmt_get_adv_discov_flags(hdev);
8289
8290 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8291 flags |= LE_AD_NO_BREDR;
8292
8293 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8294 &flags, sizeof(flags));
8295 break;
8296 }
8297
8298 hci_dev_unlock(hdev);
8299
8300 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8301
8302 status = MGMT_STATUS_SUCCESS;
8303
8304 complete:
8305 rp->type = cp->type;
8306 rp->eir_len = cpu_to_le16(eir_len);
8307
8308 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8309 status, rp, sizeof(*rp) + eir_len);
8310 if (err < 0 || status)
8311 goto done;
8312
8313 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8314 rp, sizeof(*rp) + eir_len,
8315 HCI_MGMT_OOB_DATA_EVENTS, sk);
8316
8317 done:
8318 kfree(rp);
8319
8320 return err;
8321 }
8322
get_supported_adv_flags(struct hci_dev * hdev)8323 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8324 {
8325 u32 flags = 0;
8326
8327 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8328 flags |= MGMT_ADV_FLAG_DISCOV;
8329 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8330 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8331 flags |= MGMT_ADV_FLAG_APPEARANCE;
8332 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8333 flags |= MGMT_ADV_PARAM_DURATION;
8334 flags |= MGMT_ADV_PARAM_TIMEOUT;
8335 flags |= MGMT_ADV_PARAM_INTERVALS;
8336 flags |= MGMT_ADV_PARAM_TX_POWER;
8337 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8338
8339 /* In extended adv TX_POWER returned from Set Adv Param
8340 * will be always valid.
8341 */
8342 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8343 flags |= MGMT_ADV_FLAG_TX_POWER;
8344
8345 if (ext_adv_capable(hdev)) {
8346 flags |= MGMT_ADV_FLAG_SEC_1M;
8347 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8348 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8349
8350 if (le_2m_capable(hdev))
8351 flags |= MGMT_ADV_FLAG_SEC_2M;
8352
8353 if (le_coded_capable(hdev))
8354 flags |= MGMT_ADV_FLAG_SEC_CODED;
8355 }
8356
8357 return flags;
8358 }
8359
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8360 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8361 void *data, u16 data_len)
8362 {
8363 struct mgmt_rp_read_adv_features *rp;
8364 size_t rp_len;
8365 int err;
8366 struct adv_info *adv_instance;
8367 u32 supported_flags;
8368 u8 *instance;
8369
8370 bt_dev_dbg(hdev, "sock %p", sk);
8371
8372 if (!lmp_le_capable(hdev))
8373 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8374 MGMT_STATUS_REJECTED);
8375
8376 hci_dev_lock(hdev);
8377
8378 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8379 rp = kmalloc(rp_len, GFP_ATOMIC);
8380 if (!rp) {
8381 hci_dev_unlock(hdev);
8382 return -ENOMEM;
8383 }
8384
8385 supported_flags = get_supported_adv_flags(hdev);
8386
8387 rp->supported_flags = cpu_to_le32(supported_flags);
8388 rp->max_adv_data_len = max_adv_len(hdev);
8389 rp->max_scan_rsp_len = max_adv_len(hdev);
8390 rp->max_instances = hdev->le_num_of_adv_sets;
8391 rp->num_instances = hdev->adv_instance_cnt;
8392
8393 instance = rp->instance;
8394 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8395 /* Only instances 1-le_num_of_adv_sets are externally visible */
8396 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8397 *instance = adv_instance->instance;
8398 instance++;
8399 } else {
8400 rp->num_instances--;
8401 rp_len--;
8402 }
8403 }
8404
8405 hci_dev_unlock(hdev);
8406
8407 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8408 MGMT_STATUS_SUCCESS, rp, rp_len);
8409
8410 kfree(rp);
8411
8412 return err;
8413 }
8414
calculate_name_len(struct hci_dev * hdev)8415 static u8 calculate_name_len(struct hci_dev *hdev)
8416 {
8417 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8418
8419 return eir_append_local_name(hdev, buf, 0);
8420 }
8421
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8422 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8423 bool is_adv_data)
8424 {
8425 u8 max_len = max_adv_len(hdev);
8426
8427 if (is_adv_data) {
8428 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8429 MGMT_ADV_FLAG_LIMITED_DISCOV |
8430 MGMT_ADV_FLAG_MANAGED_FLAGS))
8431 max_len -= 3;
8432
8433 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8434 max_len -= 3;
8435 } else {
8436 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8437 max_len -= calculate_name_len(hdev);
8438
8439 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8440 max_len -= 4;
8441 }
8442
8443 return max_len;
8444 }
8445
flags_managed(u32 adv_flags)8446 static bool flags_managed(u32 adv_flags)
8447 {
8448 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8449 MGMT_ADV_FLAG_LIMITED_DISCOV |
8450 MGMT_ADV_FLAG_MANAGED_FLAGS);
8451 }
8452
tx_power_managed(u32 adv_flags)8453 static bool tx_power_managed(u32 adv_flags)
8454 {
8455 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8456 }
8457
name_managed(u32 adv_flags)8458 static bool name_managed(u32 adv_flags)
8459 {
8460 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8461 }
8462
appearance_managed(u32 adv_flags)8463 static bool appearance_managed(u32 adv_flags)
8464 {
8465 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8466 }
8467
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8468 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8469 u8 len, bool is_adv_data)
8470 {
8471 int i, cur_len;
8472 u8 max_len;
8473
8474 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8475
8476 if (len > max_len)
8477 return false;
8478
8479 /* Make sure that the data is correctly formatted. */
8480 for (i = 0; i < len; i += (cur_len + 1)) {
8481 cur_len = data[i];
8482
8483 if (!cur_len)
8484 continue;
8485
8486 if (data[i + 1] == EIR_FLAGS &&
8487 (!is_adv_data || flags_managed(adv_flags)))
8488 return false;
8489
8490 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8491 return false;
8492
8493 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8494 return false;
8495
8496 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8497 return false;
8498
8499 if (data[i + 1] == EIR_APPEARANCE &&
8500 appearance_managed(adv_flags))
8501 return false;
8502
8503 /* If the current field length would exceed the total data
8504 * length, then it's invalid.
8505 */
8506 if (i + cur_len >= len)
8507 return false;
8508 }
8509
8510 return true;
8511 }
8512
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8513 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8514 {
8515 u32 supported_flags, phy_flags;
8516
8517 /* The current implementation only supports a subset of the specified
8518 * flags. Also need to check mutual exclusiveness of sec flags.
8519 */
8520 supported_flags = get_supported_adv_flags(hdev);
8521 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8522 if (adv_flags & ~supported_flags ||
8523 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8524 return false;
8525
8526 return true;
8527 }
8528
adv_busy(struct hci_dev * hdev)8529 static bool adv_busy(struct hci_dev *hdev)
8530 {
8531 return pending_find(MGMT_OP_SET_LE, hdev);
8532 }
8533
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8534 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8535 int err)
8536 {
8537 struct adv_info *adv, *n;
8538
8539 bt_dev_dbg(hdev, "err %d", err);
8540
8541 hci_dev_lock(hdev);
8542
8543 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8544 u8 instance;
8545
8546 if (!adv->pending)
8547 continue;
8548
8549 if (!err) {
8550 adv->pending = false;
8551 continue;
8552 }
8553
8554 instance = adv->instance;
8555
8556 if (hdev->cur_adv_instance == instance)
8557 cancel_adv_timeout(hdev);
8558
8559 hci_remove_adv_instance(hdev, instance);
8560 mgmt_advertising_removed(sk, hdev, instance);
8561 }
8562
8563 hci_dev_unlock(hdev);
8564 }
8565
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8566 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8567 {
8568 struct mgmt_pending_cmd *cmd = data;
8569 struct mgmt_cp_add_advertising *cp = cmd->param;
8570 struct mgmt_rp_add_advertising rp;
8571
8572 memset(&rp, 0, sizeof(rp));
8573
8574 rp.instance = cp->instance;
8575
8576 if (err)
8577 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8578 mgmt_status(err));
8579 else
8580 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8581 mgmt_status(err), &rp, sizeof(rp));
8582
8583 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8584
8585 mgmt_pending_free(cmd);
8586 }
8587
add_advertising_sync(struct hci_dev * hdev,void * data)8588 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8589 {
8590 struct mgmt_pending_cmd *cmd = data;
8591 struct mgmt_cp_add_advertising *cp = cmd->param;
8592
8593 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8594 }
8595
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8596 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8597 void *data, u16 data_len)
8598 {
8599 struct mgmt_cp_add_advertising *cp = data;
8600 struct mgmt_rp_add_advertising rp;
8601 u32 flags;
8602 u8 status;
8603 u16 timeout, duration;
8604 unsigned int prev_instance_cnt;
8605 u8 schedule_instance = 0;
8606 struct adv_info *adv, *next_instance;
8607 int err;
8608 struct mgmt_pending_cmd *cmd;
8609
8610 bt_dev_dbg(hdev, "sock %p", sk);
8611
8612 status = mgmt_le_support(hdev);
8613 if (status)
8614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8615 status);
8616
8617 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8618 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8619 MGMT_STATUS_INVALID_PARAMS);
8620
8621 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8622 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8623 MGMT_STATUS_INVALID_PARAMS);
8624
8625 flags = __le32_to_cpu(cp->flags);
8626 timeout = __le16_to_cpu(cp->timeout);
8627 duration = __le16_to_cpu(cp->duration);
8628
8629 if (!requested_adv_flags_are_valid(hdev, flags))
8630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8631 MGMT_STATUS_INVALID_PARAMS);
8632
8633 hci_dev_lock(hdev);
8634
8635 if (timeout && !hdev_is_powered(hdev)) {
8636 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8637 MGMT_STATUS_REJECTED);
8638 goto unlock;
8639 }
8640
8641 if (adv_busy(hdev)) {
8642 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8643 MGMT_STATUS_BUSY);
8644 goto unlock;
8645 }
8646
8647 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8648 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8649 cp->scan_rsp_len, false)) {
8650 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8651 MGMT_STATUS_INVALID_PARAMS);
8652 goto unlock;
8653 }
8654
8655 prev_instance_cnt = hdev->adv_instance_cnt;
8656
8657 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8658 cp->adv_data_len, cp->data,
8659 cp->scan_rsp_len,
8660 cp->data + cp->adv_data_len,
8661 timeout, duration,
8662 HCI_ADV_TX_POWER_NO_PREFERENCE,
8663 hdev->le_adv_min_interval,
8664 hdev->le_adv_max_interval, 0);
8665 if (IS_ERR(adv)) {
8666 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8667 MGMT_STATUS_FAILED);
8668 goto unlock;
8669 }
8670
8671 /* Only trigger an advertising added event if a new instance was
8672 * actually added.
8673 */
8674 if (hdev->adv_instance_cnt > prev_instance_cnt)
8675 mgmt_advertising_added(sk, hdev, cp->instance);
8676
8677 if (hdev->cur_adv_instance == cp->instance) {
8678 /* If the currently advertised instance is being changed then
8679 * cancel the current advertising and schedule the next
8680 * instance. If there is only one instance then the overridden
8681 * advertising data will be visible right away.
8682 */
8683 cancel_adv_timeout(hdev);
8684
8685 next_instance = hci_get_next_instance(hdev, cp->instance);
8686 if (next_instance)
8687 schedule_instance = next_instance->instance;
8688 } else if (!hdev->adv_instance_timeout) {
8689 /* Immediately advertise the new instance if no other
8690 * instance is currently being advertised.
8691 */
8692 schedule_instance = cp->instance;
8693 }
8694
8695 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8696 * there is no instance to be advertised then we have no HCI
8697 * communication to make. Simply return.
8698 */
8699 if (!hdev_is_powered(hdev) ||
8700 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8701 !schedule_instance) {
8702 rp.instance = cp->instance;
8703 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8704 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8705 goto unlock;
8706 }
8707
8708 /* We're good to go, update advertising data, parameters, and start
8709 * advertising.
8710 */
8711 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8712 data_len);
8713 if (!cmd) {
8714 err = -ENOMEM;
8715 goto unlock;
8716 }
8717
8718 cp->instance = schedule_instance;
8719
8720 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8721 add_advertising_complete);
8722 if (err < 0)
8723 mgmt_pending_free(cmd);
8724
8725 unlock:
8726 hci_dev_unlock(hdev);
8727
8728 return err;
8729 }
8730
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8731 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8732 int err)
8733 {
8734 struct mgmt_pending_cmd *cmd = data;
8735 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8736 struct mgmt_rp_add_ext_adv_params rp;
8737 struct adv_info *adv;
8738 u32 flags;
8739
8740 BT_DBG("%s", hdev->name);
8741
8742 hci_dev_lock(hdev);
8743
8744 adv = hci_find_adv_instance(hdev, cp->instance);
8745 if (!adv)
8746 goto unlock;
8747
8748 rp.instance = cp->instance;
8749 rp.tx_power = adv->tx_power;
8750
8751 /* While we're at it, inform userspace of the available space for this
8752 * advertisement, given the flags that will be used.
8753 */
8754 flags = __le32_to_cpu(cp->flags);
8755 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8756 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8757
8758 if (err) {
8759 /* If this advertisement was previously advertising and we
8760 * failed to update it, we signal that it has been removed and
8761 * delete its structure
8762 */
8763 if (!adv->pending)
8764 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8765
8766 hci_remove_adv_instance(hdev, cp->instance);
8767
8768 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8769 mgmt_status(err));
8770 } else {
8771 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8772 mgmt_status(err), &rp, sizeof(rp));
8773 }
8774
8775 unlock:
8776 if (cmd)
8777 mgmt_pending_free(cmd);
8778
8779 hci_dev_unlock(hdev);
8780 }
8781
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8782 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8783 {
8784 struct mgmt_pending_cmd *cmd = data;
8785 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8786
8787 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8788 }
8789
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8790 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8791 void *data, u16 data_len)
8792 {
8793 struct mgmt_cp_add_ext_adv_params *cp = data;
8794 struct mgmt_rp_add_ext_adv_params rp;
8795 struct mgmt_pending_cmd *cmd = NULL;
8796 struct adv_info *adv;
8797 u32 flags, min_interval, max_interval;
8798 u16 timeout, duration;
8799 u8 status;
8800 s8 tx_power;
8801 int err;
8802
8803 BT_DBG("%s", hdev->name);
8804
8805 status = mgmt_le_support(hdev);
8806 if (status)
8807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8808 status);
8809
8810 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8812 MGMT_STATUS_INVALID_PARAMS);
8813
8814 /* The purpose of breaking add_advertising into two separate MGMT calls
8815 * for params and data is to allow more parameters to be added to this
8816 * structure in the future. For this reason, we verify that we have the
8817 * bare minimum structure we know of when the interface was defined. Any
8818 * extra parameters we don't know about will be ignored in this request.
8819 */
8820 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8822 MGMT_STATUS_INVALID_PARAMS);
8823
8824 flags = __le32_to_cpu(cp->flags);
8825
8826 if (!requested_adv_flags_are_valid(hdev, flags))
8827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8828 MGMT_STATUS_INVALID_PARAMS);
8829
8830 hci_dev_lock(hdev);
8831
8832 /* In new interface, we require that we are powered to register */
8833 if (!hdev_is_powered(hdev)) {
8834 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8835 MGMT_STATUS_REJECTED);
8836 goto unlock;
8837 }
8838
8839 if (adv_busy(hdev)) {
8840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8841 MGMT_STATUS_BUSY);
8842 goto unlock;
8843 }
8844
8845 /* Parse defined parameters from request, use defaults otherwise */
8846 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8847 __le16_to_cpu(cp->timeout) : 0;
8848
8849 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8850 __le16_to_cpu(cp->duration) :
8851 hdev->def_multi_adv_rotation_duration;
8852
8853 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8854 __le32_to_cpu(cp->min_interval) :
8855 hdev->le_adv_min_interval;
8856
8857 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8858 __le32_to_cpu(cp->max_interval) :
8859 hdev->le_adv_max_interval;
8860
8861 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8862 cp->tx_power :
8863 HCI_ADV_TX_POWER_NO_PREFERENCE;
8864
8865 /* Create advertising instance with no advertising or response data */
8866 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8867 timeout, duration, tx_power, min_interval,
8868 max_interval, 0);
8869
8870 if (IS_ERR(adv)) {
8871 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8872 MGMT_STATUS_FAILED);
8873 goto unlock;
8874 }
8875
8876 /* Submit request for advertising params if ext adv available */
8877 if (ext_adv_capable(hdev)) {
8878 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8879 data, data_len);
8880 if (!cmd) {
8881 err = -ENOMEM;
8882 hci_remove_adv_instance(hdev, cp->instance);
8883 goto unlock;
8884 }
8885
8886 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8887 add_ext_adv_params_complete);
8888 if (err < 0)
8889 mgmt_pending_free(cmd);
8890 } else {
8891 rp.instance = cp->instance;
8892 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8893 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8894 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8895 err = mgmt_cmd_complete(sk, hdev->id,
8896 MGMT_OP_ADD_EXT_ADV_PARAMS,
8897 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8898 }
8899
8900 unlock:
8901 hci_dev_unlock(hdev);
8902
8903 return err;
8904 }
8905
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8906 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8907 {
8908 struct mgmt_pending_cmd *cmd = data;
8909 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8910 struct mgmt_rp_add_advertising rp;
8911
8912 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8913
8914 memset(&rp, 0, sizeof(rp));
8915
8916 rp.instance = cp->instance;
8917
8918 if (err)
8919 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8920 mgmt_status(err));
8921 else
8922 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8923 mgmt_status(err), &rp, sizeof(rp));
8924
8925 mgmt_pending_free(cmd);
8926 }
8927
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8928 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8929 {
8930 struct mgmt_pending_cmd *cmd = data;
8931 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8932 int err;
8933
8934 if (ext_adv_capable(hdev)) {
8935 err = hci_update_adv_data_sync(hdev, cp->instance);
8936 if (err)
8937 return err;
8938
8939 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8940 if (err)
8941 return err;
8942
8943 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8944 }
8945
8946 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8947 }
8948
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8949 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8950 u16 data_len)
8951 {
8952 struct mgmt_cp_add_ext_adv_data *cp = data;
8953 struct mgmt_rp_add_ext_adv_data rp;
8954 u8 schedule_instance = 0;
8955 struct adv_info *next_instance;
8956 struct adv_info *adv_instance;
8957 int err = 0;
8958 struct mgmt_pending_cmd *cmd;
8959
8960 BT_DBG("%s", hdev->name);
8961
8962 hci_dev_lock(hdev);
8963
8964 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8965
8966 if (!adv_instance) {
8967 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8968 MGMT_STATUS_INVALID_PARAMS);
8969 goto unlock;
8970 }
8971
8972 /* In new interface, we require that we are powered to register */
8973 if (!hdev_is_powered(hdev)) {
8974 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8975 MGMT_STATUS_REJECTED);
8976 goto clear_new_instance;
8977 }
8978
8979 if (adv_busy(hdev)) {
8980 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8981 MGMT_STATUS_BUSY);
8982 goto clear_new_instance;
8983 }
8984
8985 /* Validate new data */
8986 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8987 cp->adv_data_len, true) ||
8988 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8989 cp->adv_data_len, cp->scan_rsp_len, false)) {
8990 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8991 MGMT_STATUS_INVALID_PARAMS);
8992 goto clear_new_instance;
8993 }
8994
8995 /* Set the data in the advertising instance */
8996 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8997 cp->data, cp->scan_rsp_len,
8998 cp->data + cp->adv_data_len);
8999
9000 /* If using software rotation, determine next instance to use */
9001 if (hdev->cur_adv_instance == cp->instance) {
9002 /* If the currently advertised instance is being changed
9003 * then cancel the current advertising and schedule the
9004 * next instance. If there is only one instance then the
9005 * overridden advertising data will be visible right
9006 * away
9007 */
9008 cancel_adv_timeout(hdev);
9009
9010 next_instance = hci_get_next_instance(hdev, cp->instance);
9011 if (next_instance)
9012 schedule_instance = next_instance->instance;
9013 } else if (!hdev->adv_instance_timeout) {
9014 /* Immediately advertise the new instance if no other
9015 * instance is currently being advertised.
9016 */
9017 schedule_instance = cp->instance;
9018 }
9019
9020 /* If the HCI_ADVERTISING flag is set or there is no instance to
9021 * be advertised then we have no HCI communication to make.
9022 * Simply return.
9023 */
9024 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9025 if (adv_instance->pending) {
9026 mgmt_advertising_added(sk, hdev, cp->instance);
9027 adv_instance->pending = false;
9028 }
9029 rp.instance = cp->instance;
9030 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9031 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9032 goto unlock;
9033 }
9034
9035 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9036 data_len);
9037 if (!cmd) {
9038 err = -ENOMEM;
9039 goto clear_new_instance;
9040 }
9041
9042 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9043 add_ext_adv_data_complete);
9044 if (err < 0) {
9045 mgmt_pending_free(cmd);
9046 goto clear_new_instance;
9047 }
9048
9049 /* We were successful in updating data, so trigger advertising_added
9050 * event if this is an instance that wasn't previously advertising. If
9051 * a failure occurs in the requests we initiated, we will remove the
9052 * instance again in add_advertising_complete
9053 */
9054 if (adv_instance->pending)
9055 mgmt_advertising_added(sk, hdev, cp->instance);
9056
9057 goto unlock;
9058
9059 clear_new_instance:
9060 hci_remove_adv_instance(hdev, cp->instance);
9061
9062 unlock:
9063 hci_dev_unlock(hdev);
9064
9065 return err;
9066 }
9067
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9068 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9069 int err)
9070 {
9071 struct mgmt_pending_cmd *cmd = data;
9072 struct mgmt_cp_remove_advertising *cp = cmd->param;
9073 struct mgmt_rp_remove_advertising rp;
9074
9075 bt_dev_dbg(hdev, "err %d", err);
9076
9077 memset(&rp, 0, sizeof(rp));
9078 rp.instance = cp->instance;
9079
9080 if (err)
9081 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9082 mgmt_status(err));
9083 else
9084 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9085 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9086
9087 mgmt_pending_free(cmd);
9088 }
9089
remove_advertising_sync(struct hci_dev * hdev,void * data)9090 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9091 {
9092 struct mgmt_pending_cmd *cmd = data;
9093 struct mgmt_cp_remove_advertising *cp = cmd->param;
9094 int err;
9095
9096 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9097 if (err)
9098 return err;
9099
9100 if (list_empty(&hdev->adv_instances))
9101 err = hci_disable_advertising_sync(hdev);
9102
9103 return err;
9104 }
9105
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9106 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9107 void *data, u16 data_len)
9108 {
9109 struct mgmt_cp_remove_advertising *cp = data;
9110 struct mgmt_pending_cmd *cmd;
9111 int err;
9112
9113 bt_dev_dbg(hdev, "sock %p", sk);
9114
9115 hci_dev_lock(hdev);
9116
9117 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9118 err = mgmt_cmd_status(sk, hdev->id,
9119 MGMT_OP_REMOVE_ADVERTISING,
9120 MGMT_STATUS_INVALID_PARAMS);
9121 goto unlock;
9122 }
9123
9124 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9125 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9126 MGMT_STATUS_BUSY);
9127 goto unlock;
9128 }
9129
9130 if (list_empty(&hdev->adv_instances)) {
9131 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9132 MGMT_STATUS_INVALID_PARAMS);
9133 goto unlock;
9134 }
9135
9136 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9137 data_len);
9138 if (!cmd) {
9139 err = -ENOMEM;
9140 goto unlock;
9141 }
9142
9143 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9144 remove_advertising_complete);
9145 if (err < 0)
9146 mgmt_pending_free(cmd);
9147
9148 unlock:
9149 hci_dev_unlock(hdev);
9150
9151 return err;
9152 }
9153
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9154 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9155 void *data, u16 data_len)
9156 {
9157 struct mgmt_cp_get_adv_size_info *cp = data;
9158 struct mgmt_rp_get_adv_size_info rp;
9159 u32 flags, supported_flags;
9160
9161 bt_dev_dbg(hdev, "sock %p", sk);
9162
9163 if (!lmp_le_capable(hdev))
9164 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9165 MGMT_STATUS_REJECTED);
9166
9167 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9169 MGMT_STATUS_INVALID_PARAMS);
9170
9171 flags = __le32_to_cpu(cp->flags);
9172
9173 /* The current implementation only supports a subset of the specified
9174 * flags.
9175 */
9176 supported_flags = get_supported_adv_flags(hdev);
9177 if (flags & ~supported_flags)
9178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9179 MGMT_STATUS_INVALID_PARAMS);
9180
9181 rp.instance = cp->instance;
9182 rp.flags = cp->flags;
9183 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9184 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9185
9186 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9187 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9188 }
9189
9190 static const struct hci_mgmt_handler mgmt_handlers[] = {
9191 { NULL }, /* 0x0000 (no command) */
9192 { read_version, MGMT_READ_VERSION_SIZE,
9193 HCI_MGMT_NO_HDEV |
9194 HCI_MGMT_UNTRUSTED },
9195 { read_commands, MGMT_READ_COMMANDS_SIZE,
9196 HCI_MGMT_NO_HDEV |
9197 HCI_MGMT_UNTRUSTED },
9198 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9199 HCI_MGMT_NO_HDEV |
9200 HCI_MGMT_UNTRUSTED },
9201 { read_controller_info, MGMT_READ_INFO_SIZE,
9202 HCI_MGMT_UNTRUSTED },
9203 { set_powered, MGMT_SETTING_SIZE },
9204 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9205 { set_connectable, MGMT_SETTING_SIZE },
9206 { set_fast_connectable, MGMT_SETTING_SIZE },
9207 { set_bondable, MGMT_SETTING_SIZE },
9208 { set_link_security, MGMT_SETTING_SIZE },
9209 { set_ssp, MGMT_SETTING_SIZE },
9210 { set_hs, MGMT_SETTING_SIZE },
9211 { set_le, MGMT_SETTING_SIZE },
9212 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9213 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9214 { add_uuid, MGMT_ADD_UUID_SIZE },
9215 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9216 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9217 HCI_MGMT_VAR_LEN },
9218 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9219 HCI_MGMT_VAR_LEN },
9220 { disconnect, MGMT_DISCONNECT_SIZE },
9221 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9222 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9223 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9224 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9225 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9226 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9227 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9228 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9229 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9230 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9231 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9232 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9233 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9234 HCI_MGMT_VAR_LEN },
9235 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9236 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9237 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9238 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9239 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9240 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9241 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9242 { set_advertising, MGMT_SETTING_SIZE },
9243 { set_bredr, MGMT_SETTING_SIZE },
9244 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9245 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9246 { set_secure_conn, MGMT_SETTING_SIZE },
9247 { set_debug_keys, MGMT_SETTING_SIZE },
9248 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9249 { load_irks, MGMT_LOAD_IRKS_SIZE,
9250 HCI_MGMT_VAR_LEN },
9251 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9252 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9253 { add_device, MGMT_ADD_DEVICE_SIZE },
9254 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9255 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9256 HCI_MGMT_VAR_LEN },
9257 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9258 HCI_MGMT_NO_HDEV |
9259 HCI_MGMT_UNTRUSTED },
9260 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9261 HCI_MGMT_UNCONFIGURED |
9262 HCI_MGMT_UNTRUSTED },
9263 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9264 HCI_MGMT_UNCONFIGURED },
9265 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9266 HCI_MGMT_UNCONFIGURED },
9267 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9268 HCI_MGMT_VAR_LEN },
9269 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9270 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9271 HCI_MGMT_NO_HDEV |
9272 HCI_MGMT_UNTRUSTED },
9273 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9274 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9275 HCI_MGMT_VAR_LEN },
9276 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9277 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9278 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9279 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9280 HCI_MGMT_UNTRUSTED },
9281 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9282 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9283 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9284 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9285 HCI_MGMT_VAR_LEN },
9286 { set_wideband_speech, MGMT_SETTING_SIZE },
9287 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9288 HCI_MGMT_UNTRUSTED },
9289 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9290 HCI_MGMT_UNTRUSTED |
9291 HCI_MGMT_HDEV_OPTIONAL },
9292 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9293 HCI_MGMT_VAR_LEN |
9294 HCI_MGMT_HDEV_OPTIONAL },
9295 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9296 HCI_MGMT_UNTRUSTED },
9297 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9298 HCI_MGMT_VAR_LEN },
9299 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9300 HCI_MGMT_UNTRUSTED },
9301 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9302 HCI_MGMT_VAR_LEN },
9303 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9304 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9305 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9306 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9307 HCI_MGMT_VAR_LEN },
9308 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9309 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9310 HCI_MGMT_VAR_LEN },
9311 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9312 HCI_MGMT_VAR_LEN },
9313 { add_adv_patterns_monitor_rssi,
9314 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9315 HCI_MGMT_VAR_LEN },
9316 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9317 HCI_MGMT_VAR_LEN },
9318 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9319 { mesh_send, MGMT_MESH_SEND_SIZE,
9320 HCI_MGMT_VAR_LEN },
9321 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9322 };
9323
mgmt_index_added(struct hci_dev * hdev)9324 void mgmt_index_added(struct hci_dev *hdev)
9325 {
9326 struct mgmt_ev_ext_index ev;
9327
9328 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9329 return;
9330
9331 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9332 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9333 HCI_MGMT_UNCONF_INDEX_EVENTS);
9334 ev.type = 0x01;
9335 } else {
9336 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9337 HCI_MGMT_INDEX_EVENTS);
9338 ev.type = 0x00;
9339 }
9340
9341 ev.bus = hdev->bus;
9342
9343 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9344 HCI_MGMT_EXT_INDEX_EVENTS);
9345 }
9346
mgmt_index_removed(struct hci_dev * hdev)9347 void mgmt_index_removed(struct hci_dev *hdev)
9348 {
9349 struct mgmt_ev_ext_index ev;
9350 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9351
9352 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9353 return;
9354
9355 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9356
9357 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9358 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9359 HCI_MGMT_UNCONF_INDEX_EVENTS);
9360 ev.type = 0x01;
9361 } else {
9362 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9363 HCI_MGMT_INDEX_EVENTS);
9364 ev.type = 0x00;
9365 }
9366
9367 ev.bus = hdev->bus;
9368
9369 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9370 HCI_MGMT_EXT_INDEX_EVENTS);
9371
9372 /* Cancel any remaining timed work */
9373 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9374 return;
9375 cancel_delayed_work_sync(&hdev->discov_off);
9376 cancel_delayed_work_sync(&hdev->service_cache);
9377 cancel_delayed_work_sync(&hdev->rpa_expired);
9378 }
9379
mgmt_power_on(struct hci_dev * hdev,int err)9380 void mgmt_power_on(struct hci_dev *hdev, int err)
9381 {
9382 struct cmd_lookup match = { NULL, hdev };
9383
9384 bt_dev_dbg(hdev, "err %d", err);
9385
9386 hci_dev_lock(hdev);
9387
9388 if (!err) {
9389 restart_le_actions(hdev);
9390 hci_update_passive_scan(hdev);
9391 }
9392
9393 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9394
9395 new_settings(hdev, match.sk);
9396
9397 if (match.sk)
9398 sock_put(match.sk);
9399
9400 hci_dev_unlock(hdev);
9401 }
9402
__mgmt_power_off(struct hci_dev * hdev)9403 void __mgmt_power_off(struct hci_dev *hdev)
9404 {
9405 struct cmd_lookup match = { NULL, hdev };
9406 u8 zero_cod[] = { 0, 0, 0 };
9407
9408 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9409
9410 /* If the power off is because of hdev unregistration let
9411 * use the appropriate INVALID_INDEX status. Otherwise use
9412 * NOT_POWERED. We cover both scenarios here since later in
9413 * mgmt_index_removed() any hci_conn callbacks will have already
9414 * been triggered, potentially causing misleading DISCONNECTED
9415 * status responses.
9416 */
9417 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9418 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9419 else
9420 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9421
9422 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9423
9424 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9425 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9426 zero_cod, sizeof(zero_cod),
9427 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9428 ext_info_changed(hdev, NULL);
9429 }
9430
9431 new_settings(hdev, match.sk);
9432
9433 if (match.sk)
9434 sock_put(match.sk);
9435 }
9436
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9437 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9438 {
9439 struct mgmt_pending_cmd *cmd;
9440 u8 status;
9441
9442 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9443 if (!cmd)
9444 return;
9445
9446 if (err == -ERFKILL)
9447 status = MGMT_STATUS_RFKILLED;
9448 else
9449 status = MGMT_STATUS_FAILED;
9450
9451 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9452
9453 mgmt_pending_remove(cmd);
9454 }
9455
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9456 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9457 bool persistent)
9458 {
9459 struct mgmt_ev_new_link_key ev;
9460
9461 memset(&ev, 0, sizeof(ev));
9462
9463 ev.store_hint = persistent;
9464 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9465 ev.key.addr.type = BDADDR_BREDR;
9466 ev.key.type = key->type;
9467 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9468 ev.key.pin_len = key->pin_len;
9469
9470 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9471 }
9472
mgmt_ltk_type(struct smp_ltk * ltk)9473 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9474 {
9475 switch (ltk->type) {
9476 case SMP_LTK:
9477 case SMP_LTK_RESPONDER:
9478 if (ltk->authenticated)
9479 return MGMT_LTK_AUTHENTICATED;
9480 return MGMT_LTK_UNAUTHENTICATED;
9481 case SMP_LTK_P256:
9482 if (ltk->authenticated)
9483 return MGMT_LTK_P256_AUTH;
9484 return MGMT_LTK_P256_UNAUTH;
9485 case SMP_LTK_P256_DEBUG:
9486 return MGMT_LTK_P256_DEBUG;
9487 }
9488
9489 return MGMT_LTK_UNAUTHENTICATED;
9490 }
9491
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9492 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9493 {
9494 struct mgmt_ev_new_long_term_key ev;
9495
9496 memset(&ev, 0, sizeof(ev));
9497
9498 /* Devices using resolvable or non-resolvable random addresses
9499 * without providing an identity resolving key don't require
9500 * to store long term keys. Their addresses will change the
9501 * next time around.
9502 *
9503 * Only when a remote device provides an identity address
9504 * make sure the long term key is stored. If the remote
9505 * identity is known, the long term keys are internally
9506 * mapped to the identity address. So allow static random
9507 * and public addresses here.
9508 */
9509 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9510 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9511 ev.store_hint = 0x00;
9512 else
9513 ev.store_hint = persistent;
9514
9515 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9516 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9517 ev.key.type = mgmt_ltk_type(key);
9518 ev.key.enc_size = key->enc_size;
9519 ev.key.ediv = key->ediv;
9520 ev.key.rand = key->rand;
9521
9522 if (key->type == SMP_LTK)
9523 ev.key.initiator = 1;
9524
9525 /* Make sure we copy only the significant bytes based on the
9526 * encryption key size, and set the rest of the value to zeroes.
9527 */
9528 memcpy(ev.key.val, key->val, key->enc_size);
9529 memset(ev.key.val + key->enc_size, 0,
9530 sizeof(ev.key.val) - key->enc_size);
9531
9532 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9533 }
9534
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9535 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9536 {
9537 struct mgmt_ev_new_irk ev;
9538
9539 memset(&ev, 0, sizeof(ev));
9540
9541 ev.store_hint = persistent;
9542
9543 bacpy(&ev.rpa, &irk->rpa);
9544 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9545 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9546 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9547
9548 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9549 }
9550
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9551 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9552 bool persistent)
9553 {
9554 struct mgmt_ev_new_csrk ev;
9555
9556 memset(&ev, 0, sizeof(ev));
9557
9558 /* Devices using resolvable or non-resolvable random addresses
9559 * without providing an identity resolving key don't require
9560 * to store signature resolving keys. Their addresses will change
9561 * the next time around.
9562 *
9563 * Only when a remote device provides an identity address
9564 * make sure the signature resolving key is stored. So allow
9565 * static random and public addresses here.
9566 */
9567 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9568 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9569 ev.store_hint = 0x00;
9570 else
9571 ev.store_hint = persistent;
9572
9573 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9574 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9575 ev.key.type = csrk->type;
9576 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9577
9578 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9579 }
9580
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9581 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9582 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9583 u16 max_interval, u16 latency, u16 timeout)
9584 {
9585 struct mgmt_ev_new_conn_param ev;
9586
9587 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9588 return;
9589
9590 memset(&ev, 0, sizeof(ev));
9591 bacpy(&ev.addr.bdaddr, bdaddr);
9592 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9593 ev.store_hint = store_hint;
9594 ev.min_interval = cpu_to_le16(min_interval);
9595 ev.max_interval = cpu_to_le16(max_interval);
9596 ev.latency = cpu_to_le16(latency);
9597 ev.timeout = cpu_to_le16(timeout);
9598
9599 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9600 }
9601
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9602 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9603 u8 *name, u8 name_len)
9604 {
9605 struct sk_buff *skb;
9606 struct mgmt_ev_device_connected *ev;
9607 u16 eir_len = 0;
9608 u32 flags = 0;
9609
9610 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9611 return;
9612
9613 /* allocate buff for LE or BR/EDR adv */
9614 if (conn->le_adv_data_len > 0)
9615 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9616 sizeof(*ev) + conn->le_adv_data_len);
9617 else
9618 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9619 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9620 eir_precalc_len(sizeof(conn->dev_class)));
9621
9622 ev = skb_put(skb, sizeof(*ev));
9623 bacpy(&ev->addr.bdaddr, &conn->dst);
9624 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9625
9626 if (conn->out)
9627 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9628
9629 ev->flags = __cpu_to_le32(flags);
9630
9631 /* We must ensure that the EIR Data fields are ordered and
9632 * unique. Keep it simple for now and avoid the problem by not
9633 * adding any BR/EDR data to the LE adv.
9634 */
9635 if (conn->le_adv_data_len > 0) {
9636 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9637 eir_len = conn->le_adv_data_len;
9638 } else {
9639 if (name)
9640 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9641
9642 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9643 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9644 conn->dev_class, sizeof(conn->dev_class));
9645 }
9646
9647 ev->eir_len = cpu_to_le16(eir_len);
9648
9649 mgmt_event_skb(skb, NULL);
9650 }
9651
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9652 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9653 {
9654 struct hci_dev *hdev = data;
9655 struct mgmt_cp_unpair_device *cp = cmd->param;
9656
9657 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9658
9659 cmd->cmd_complete(cmd, 0);
9660 mgmt_pending_remove(cmd);
9661 }
9662
mgmt_powering_down(struct hci_dev * hdev)9663 bool mgmt_powering_down(struct hci_dev *hdev)
9664 {
9665 struct mgmt_pending_cmd *cmd;
9666 struct mgmt_mode *cp;
9667
9668 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9669 if (!cmd)
9670 return false;
9671
9672 cp = cmd->param;
9673 if (!cp->val)
9674 return true;
9675
9676 return false;
9677 }
9678
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9679 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9680 u8 link_type, u8 addr_type, u8 reason,
9681 bool mgmt_connected)
9682 {
9683 struct mgmt_ev_device_disconnected ev;
9684 struct sock *sk = NULL;
9685
9686 if (!mgmt_connected)
9687 return;
9688
9689 if (link_type != ACL_LINK && link_type != LE_LINK)
9690 return;
9691
9692 bacpy(&ev.addr.bdaddr, bdaddr);
9693 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9694 ev.reason = reason;
9695
9696 /* Report disconnects due to suspend */
9697 if (hdev->suspended)
9698 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9699
9700 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9701
9702 if (sk)
9703 sock_put(sk);
9704 }
9705
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9706 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9707 u8 link_type, u8 addr_type, u8 status)
9708 {
9709 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9710 struct mgmt_cp_disconnect *cp;
9711 struct mgmt_pending_cmd *cmd;
9712
9713 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9714 hdev);
9715
9716 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9717 if (!cmd)
9718 return;
9719
9720 cp = cmd->param;
9721
9722 if (bacmp(bdaddr, &cp->addr.bdaddr))
9723 return;
9724
9725 if (cp->addr.type != bdaddr_type)
9726 return;
9727
9728 cmd->cmd_complete(cmd, mgmt_status(status));
9729 mgmt_pending_remove(cmd);
9730 }
9731
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9732 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9733 {
9734 struct mgmt_ev_connect_failed ev;
9735
9736 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9737 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9738 conn->dst_type, status, true);
9739 return;
9740 }
9741
9742 bacpy(&ev.addr.bdaddr, &conn->dst);
9743 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9744 ev.status = mgmt_status(status);
9745
9746 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9747 }
9748
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9749 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9750 {
9751 struct mgmt_ev_pin_code_request ev;
9752
9753 bacpy(&ev.addr.bdaddr, bdaddr);
9754 ev.addr.type = BDADDR_BREDR;
9755 ev.secure = secure;
9756
9757 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9758 }
9759
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9760 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9761 u8 status)
9762 {
9763 struct mgmt_pending_cmd *cmd;
9764
9765 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9766 if (!cmd)
9767 return;
9768
9769 cmd->cmd_complete(cmd, mgmt_status(status));
9770 mgmt_pending_remove(cmd);
9771 }
9772
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9773 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9774 u8 status)
9775 {
9776 struct mgmt_pending_cmd *cmd;
9777
9778 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9779 if (!cmd)
9780 return;
9781
9782 cmd->cmd_complete(cmd, mgmt_status(status));
9783 mgmt_pending_remove(cmd);
9784 }
9785
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9786 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9787 u8 link_type, u8 addr_type, u32 value,
9788 u8 confirm_hint)
9789 {
9790 struct mgmt_ev_user_confirm_request ev;
9791
9792 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9793
9794 bacpy(&ev.addr.bdaddr, bdaddr);
9795 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9796 ev.confirm_hint = confirm_hint;
9797 ev.value = cpu_to_le32(value);
9798
9799 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9800 NULL);
9801 }
9802
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9803 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9804 u8 link_type, u8 addr_type)
9805 {
9806 struct mgmt_ev_user_passkey_request ev;
9807
9808 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9809
9810 bacpy(&ev.addr.bdaddr, bdaddr);
9811 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9812
9813 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9814 NULL);
9815 }
9816
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9817 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9818 u8 link_type, u8 addr_type, u8 status,
9819 u8 opcode)
9820 {
9821 struct mgmt_pending_cmd *cmd;
9822
9823 cmd = pending_find(opcode, hdev);
9824 if (!cmd)
9825 return -ENOENT;
9826
9827 cmd->cmd_complete(cmd, mgmt_status(status));
9828 mgmt_pending_remove(cmd);
9829
9830 return 0;
9831 }
9832
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9833 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 u8 link_type, u8 addr_type, u8 status)
9835 {
9836 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9837 status, MGMT_OP_USER_CONFIRM_REPLY);
9838 }
9839
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9840 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9841 u8 link_type, u8 addr_type, u8 status)
9842 {
9843 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9844 status,
9845 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9846 }
9847
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9848 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9849 u8 link_type, u8 addr_type, u8 status)
9850 {
9851 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9852 status, MGMT_OP_USER_PASSKEY_REPLY);
9853 }
9854
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9855 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9856 u8 link_type, u8 addr_type, u8 status)
9857 {
9858 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9859 status,
9860 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9861 }
9862
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9863 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9864 u8 link_type, u8 addr_type, u32 passkey,
9865 u8 entered)
9866 {
9867 struct mgmt_ev_passkey_notify ev;
9868
9869 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9870
9871 bacpy(&ev.addr.bdaddr, bdaddr);
9872 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9873 ev.passkey = __cpu_to_le32(passkey);
9874 ev.entered = entered;
9875
9876 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9877 }
9878
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9879 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9880 {
9881 struct mgmt_ev_auth_failed ev;
9882 struct mgmt_pending_cmd *cmd;
9883 u8 status = mgmt_status(hci_status);
9884
9885 bacpy(&ev.addr.bdaddr, &conn->dst);
9886 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9887 ev.status = status;
9888
9889 cmd = find_pairing(conn);
9890
9891 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9892 cmd ? cmd->sk : NULL);
9893
9894 if (cmd) {
9895 cmd->cmd_complete(cmd, status);
9896 mgmt_pending_remove(cmd);
9897 }
9898 }
9899
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9900 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9901 {
9902 struct cmd_lookup match = { NULL, hdev };
9903 bool changed;
9904
9905 if (status) {
9906 u8 mgmt_err = mgmt_status(status);
9907 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9908 cmd_status_rsp, &mgmt_err);
9909 return;
9910 }
9911
9912 if (test_bit(HCI_AUTH, &hdev->flags))
9913 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9914 else
9915 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9916
9917 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9918 &match);
9919
9920 if (changed)
9921 new_settings(hdev, match.sk);
9922
9923 if (match.sk)
9924 sock_put(match.sk);
9925 }
9926
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9927 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9928 {
9929 struct cmd_lookup *match = data;
9930
9931 if (match->sk == NULL) {
9932 match->sk = cmd->sk;
9933 sock_hold(match->sk);
9934 }
9935 }
9936
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9937 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9938 u8 status)
9939 {
9940 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9941
9942 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9943 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9944 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9945
9946 if (!status) {
9947 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9948 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9949 ext_info_changed(hdev, NULL);
9950 }
9951
9952 if (match.sk)
9953 sock_put(match.sk);
9954 }
9955
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9956 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9957 {
9958 struct mgmt_cp_set_local_name ev;
9959 struct mgmt_pending_cmd *cmd;
9960
9961 if (status)
9962 return;
9963
9964 memset(&ev, 0, sizeof(ev));
9965 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9966 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9967
9968 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9969 if (!cmd) {
9970 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9971
9972 /* If this is a HCI command related to powering on the
9973 * HCI dev don't send any mgmt signals.
9974 */
9975 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9976 return;
9977 }
9978
9979 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9980 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9981 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9982 }
9983
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])9984 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9985 {
9986 int i;
9987
9988 for (i = 0; i < uuid_count; i++) {
9989 if (!memcmp(uuid, uuids[i], 16))
9990 return true;
9991 }
9992
9993 return false;
9994 }
9995
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])9996 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9997 {
9998 u16 parsed = 0;
9999
10000 while (parsed < eir_len) {
10001 u8 field_len = eir[0];
10002 u8 uuid[16];
10003 int i;
10004
10005 if (field_len == 0)
10006 break;
10007
10008 if (eir_len - parsed < field_len + 1)
10009 break;
10010
10011 switch (eir[1]) {
10012 case EIR_UUID16_ALL:
10013 case EIR_UUID16_SOME:
10014 for (i = 0; i + 3 <= field_len; i += 2) {
10015 memcpy(uuid, bluetooth_base_uuid, 16);
10016 uuid[13] = eir[i + 3];
10017 uuid[12] = eir[i + 2];
10018 if (has_uuid(uuid, uuid_count, uuids))
10019 return true;
10020 }
10021 break;
10022 case EIR_UUID32_ALL:
10023 case EIR_UUID32_SOME:
10024 for (i = 0; i + 5 <= field_len; i += 4) {
10025 memcpy(uuid, bluetooth_base_uuid, 16);
10026 uuid[15] = eir[i + 5];
10027 uuid[14] = eir[i + 4];
10028 uuid[13] = eir[i + 3];
10029 uuid[12] = eir[i + 2];
10030 if (has_uuid(uuid, uuid_count, uuids))
10031 return true;
10032 }
10033 break;
10034 case EIR_UUID128_ALL:
10035 case EIR_UUID128_SOME:
10036 for (i = 0; i + 17 <= field_len; i += 16) {
10037 memcpy(uuid, eir + i + 2, 16);
10038 if (has_uuid(uuid, uuid_count, uuids))
10039 return true;
10040 }
10041 break;
10042 }
10043
10044 parsed += field_len + 1;
10045 eir += field_len + 1;
10046 }
10047
10048 return false;
10049 }
10050
restart_le_scan(struct hci_dev * hdev)10051 static void restart_le_scan(struct hci_dev *hdev)
10052 {
10053 /* If controller is not scanning we are done. */
10054 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10055 return;
10056
10057 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10058 hdev->discovery.scan_start +
10059 hdev->discovery.scan_duration))
10060 return;
10061
10062 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10063 DISCOV_LE_RESTART_DELAY);
10064 }
10065
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10066 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10067 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10068 {
10069 /* If a RSSI threshold has been specified, and
10070 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10071 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10072 * is set, let it through for further processing, as we might need to
10073 * restart the scan.
10074 *
10075 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10076 * the results are also dropped.
10077 */
10078 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10079 (rssi == HCI_RSSI_INVALID ||
10080 (rssi < hdev->discovery.rssi &&
10081 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10082 return false;
10083
10084 if (hdev->discovery.uuid_count != 0) {
10085 /* If a list of UUIDs is provided in filter, results with no
10086 * matching UUID should be dropped.
10087 */
10088 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10089 hdev->discovery.uuids) &&
10090 !eir_has_uuids(scan_rsp, scan_rsp_len,
10091 hdev->discovery.uuid_count,
10092 hdev->discovery.uuids))
10093 return false;
10094 }
10095
10096 /* If duplicate filtering does not report RSSI changes, then restart
10097 * scanning to ensure updated result with updated RSSI values.
10098 */
10099 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10100 restart_le_scan(hdev);
10101
10102 /* Validate RSSI value against the RSSI threshold once more. */
10103 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10104 rssi < hdev->discovery.rssi)
10105 return false;
10106 }
10107
10108 return true;
10109 }
10110
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10111 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10112 bdaddr_t *bdaddr, u8 addr_type)
10113 {
10114 struct mgmt_ev_adv_monitor_device_lost ev;
10115
10116 ev.monitor_handle = cpu_to_le16(handle);
10117 bacpy(&ev.addr.bdaddr, bdaddr);
10118 ev.addr.type = addr_type;
10119
10120 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10121 NULL);
10122 }
10123
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10124 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10125 struct sk_buff *skb,
10126 struct sock *skip_sk,
10127 u16 handle)
10128 {
10129 struct sk_buff *advmon_skb;
10130 size_t advmon_skb_len;
10131 __le16 *monitor_handle;
10132
10133 if (!skb)
10134 return;
10135
10136 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10137 sizeof(struct mgmt_ev_device_found)) + skb->len;
10138 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10139 advmon_skb_len);
10140 if (!advmon_skb)
10141 return;
10142
10143 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10144 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10145 * store monitor_handle of the matched monitor.
10146 */
10147 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10148 *monitor_handle = cpu_to_le16(handle);
10149 skb_put_data(advmon_skb, skb->data, skb->len);
10150
10151 mgmt_event_skb(advmon_skb, skip_sk);
10152 }
10153
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10154 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10155 bdaddr_t *bdaddr, bool report_device,
10156 struct sk_buff *skb,
10157 struct sock *skip_sk)
10158 {
10159 struct monitored_device *dev, *tmp;
10160 bool matched = false;
10161 bool notified = false;
10162
10163 /* We have received the Advertisement Report because:
10164 * 1. the kernel has initiated active discovery
10165 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10166 * passive scanning
10167 * 3. if none of the above is true, we have one or more active
10168 * Advertisement Monitor
10169 *
10170 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10171 * and report ONLY one advertisement per device for the matched Monitor
10172 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10173 *
10174 * For case 3, since we are not active scanning and all advertisements
10175 * received are due to a matched Advertisement Monitor, report all
10176 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10177 */
10178 if (report_device && !hdev->advmon_pend_notify) {
10179 mgmt_event_skb(skb, skip_sk);
10180 return;
10181 }
10182
10183 hdev->advmon_pend_notify = false;
10184
10185 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10186 if (!bacmp(&dev->bdaddr, bdaddr)) {
10187 matched = true;
10188
10189 if (!dev->notified) {
10190 mgmt_send_adv_monitor_device_found(hdev, skb,
10191 skip_sk,
10192 dev->handle);
10193 notified = true;
10194 dev->notified = true;
10195 }
10196 }
10197
10198 if (!dev->notified)
10199 hdev->advmon_pend_notify = true;
10200 }
10201
10202 if (!report_device &&
10203 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10204 /* Handle 0 indicates that we are not active scanning and this
10205 * is a subsequent advertisement report for an already matched
10206 * Advertisement Monitor or the controller offloading support
10207 * is not available.
10208 */
10209 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10210 }
10211
10212 if (report_device)
10213 mgmt_event_skb(skb, skip_sk);
10214 else
10215 kfree_skb(skb);
10216 }
10217
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10218 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10219 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10220 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10221 u64 instant)
10222 {
10223 struct sk_buff *skb;
10224 struct mgmt_ev_mesh_device_found *ev;
10225 int i, j;
10226
10227 if (!hdev->mesh_ad_types[0])
10228 goto accepted;
10229
10230 /* Scan for requested AD types */
10231 if (eir_len > 0) {
10232 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10233 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10234 if (!hdev->mesh_ad_types[j])
10235 break;
10236
10237 if (hdev->mesh_ad_types[j] == eir[i + 1])
10238 goto accepted;
10239 }
10240 }
10241 }
10242
10243 if (scan_rsp_len > 0) {
10244 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10245 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10246 if (!hdev->mesh_ad_types[j])
10247 break;
10248
10249 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10250 goto accepted;
10251 }
10252 }
10253 }
10254
10255 return;
10256
10257 accepted:
10258 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10259 sizeof(*ev) + eir_len + scan_rsp_len);
10260 if (!skb)
10261 return;
10262
10263 ev = skb_put(skb, sizeof(*ev));
10264
10265 bacpy(&ev->addr.bdaddr, bdaddr);
10266 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10267 ev->rssi = rssi;
10268 ev->flags = cpu_to_le32(flags);
10269 ev->instant = cpu_to_le64(instant);
10270
10271 if (eir_len > 0)
10272 /* Copy EIR or advertising data into event */
10273 skb_put_data(skb, eir, eir_len);
10274
10275 if (scan_rsp_len > 0)
10276 /* Append scan response data to event */
10277 skb_put_data(skb, scan_rsp, scan_rsp_len);
10278
10279 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10280
10281 mgmt_event_skb(skb, NULL);
10282 }
10283
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10284 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10285 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10286 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10287 u64 instant)
10288 {
10289 struct sk_buff *skb;
10290 struct mgmt_ev_device_found *ev;
10291 bool report_device = hci_discovery_active(hdev);
10292
10293 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10294 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10295 eir, eir_len, scan_rsp, scan_rsp_len,
10296 instant);
10297
10298 /* Don't send events for a non-kernel initiated discovery. With
10299 * LE one exception is if we have pend_le_reports > 0 in which
10300 * case we're doing passive scanning and want these events.
10301 */
10302 if (!hci_discovery_active(hdev)) {
10303 if (link_type == ACL_LINK)
10304 return;
10305 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10306 report_device = true;
10307 else if (!hci_is_adv_monitoring(hdev))
10308 return;
10309 }
10310
10311 if (hdev->discovery.result_filtering) {
10312 /* We are using service discovery */
10313 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10314 scan_rsp_len))
10315 return;
10316 }
10317
10318 if (hdev->discovery.limited) {
10319 /* Check for limited discoverable bit */
10320 if (dev_class) {
10321 if (!(dev_class[1] & 0x20))
10322 return;
10323 } else {
10324 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10325 if (!flags || !(flags[0] & LE_AD_LIMITED))
10326 return;
10327 }
10328 }
10329
10330 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10331 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10332 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10333 if (!skb)
10334 return;
10335
10336 ev = skb_put(skb, sizeof(*ev));
10337
10338 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10339 * RSSI value was reported as 0 when not available. This behavior
10340 * is kept when using device discovery. This is required for full
10341 * backwards compatibility with the API.
10342 *
10343 * However when using service discovery, the value 127 will be
10344 * returned when the RSSI is not available.
10345 */
10346 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10347 link_type == ACL_LINK)
10348 rssi = 0;
10349
10350 bacpy(&ev->addr.bdaddr, bdaddr);
10351 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10352 ev->rssi = rssi;
10353 ev->flags = cpu_to_le32(flags);
10354
10355 if (eir_len > 0)
10356 /* Copy EIR or advertising data into event */
10357 skb_put_data(skb, eir, eir_len);
10358
10359 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10360 u8 eir_cod[5];
10361
10362 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10363 dev_class, 3);
10364 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10365 }
10366
10367 if (scan_rsp_len > 0)
10368 /* Append scan response data to event */
10369 skb_put_data(skb, scan_rsp, scan_rsp_len);
10370
10371 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10372
10373 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10374 }
10375
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10376 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10377 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10378 {
10379 struct sk_buff *skb;
10380 struct mgmt_ev_device_found *ev;
10381 u16 eir_len = 0;
10382 u32 flags = 0;
10383
10384 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10385 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10386
10387 ev = skb_put(skb, sizeof(*ev));
10388 bacpy(&ev->addr.bdaddr, bdaddr);
10389 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10390 ev->rssi = rssi;
10391
10392 if (name)
10393 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10394 else
10395 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10396
10397 ev->eir_len = cpu_to_le16(eir_len);
10398 ev->flags = cpu_to_le32(flags);
10399
10400 mgmt_event_skb(skb, NULL);
10401 }
10402
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10403 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10404 {
10405 struct mgmt_ev_discovering ev;
10406
10407 bt_dev_dbg(hdev, "discovering %u", discovering);
10408
10409 memset(&ev, 0, sizeof(ev));
10410 ev.type = hdev->discovery.type;
10411 ev.discovering = discovering;
10412
10413 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10414 }
10415
mgmt_suspending(struct hci_dev * hdev,u8 state)10416 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10417 {
10418 struct mgmt_ev_controller_suspend ev;
10419
10420 ev.suspend_state = state;
10421 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10422 }
10423
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10424 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10425 u8 addr_type)
10426 {
10427 struct mgmt_ev_controller_resume ev;
10428
10429 ev.wake_reason = reason;
10430 if (bdaddr) {
10431 bacpy(&ev.addr.bdaddr, bdaddr);
10432 ev.addr.type = addr_type;
10433 } else {
10434 memset(&ev.addr, 0, sizeof(ev.addr));
10435 }
10436
10437 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10438 }
10439
10440 static struct hci_mgmt_chan chan = {
10441 .channel = HCI_CHANNEL_CONTROL,
10442 .handler_count = ARRAY_SIZE(mgmt_handlers),
10443 .handlers = mgmt_handlers,
10444 .hdev_init = mgmt_init_hdev,
10445 };
10446
mgmt_init(void)10447 int mgmt_init(void)
10448 {
10449 return hci_mgmt_chan_register(&chan);
10450 }
10451
mgmt_exit(void)10452 void mgmt_exit(void)
10453 {
10454 hci_mgmt_chan_unregister(&chan);
10455 }
10456
mgmt_cleanup(struct sock * sk)10457 void mgmt_cleanup(struct sock *sk)
10458 {
10459 struct mgmt_mesh_tx *mesh_tx;
10460 struct hci_dev *hdev;
10461
10462 read_lock(&hci_dev_list_lock);
10463
10464 list_for_each_entry(hdev, &hci_dev_list, list) {
10465 do {
10466 mesh_tx = mgmt_mesh_next(hdev, sk);
10467
10468 if (mesh_tx)
10469 mesh_send_complete(hdev, mesh_tx, true);
10470 } while (mesh_tx);
10471 }
10472
10473 read_unlock(&hci_dev_list_lock);
10474 }
10475