1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
46
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855
856 return settings;
857 }
858
get_current_settings(struct hci_dev * hdev)859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 u32 settings = 0;
862
863 if (hdev_is_powered(hdev))
864 settings |= MGMT_SETTING_POWERED;
865
866 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 settings |= MGMT_SETTING_CONNECTABLE;
868
869 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 settings |= MGMT_SETTING_DISCOVERABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 settings |= MGMT_SETTING_BONDABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 settings |= MGMT_SETTING_BREDR;
880
881 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 settings |= MGMT_SETTING_LE;
883
884 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 settings |= MGMT_SETTING_LINK_SECURITY;
886
887 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 settings |= MGMT_SETTING_SSP;
889
890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 settings |= MGMT_SETTING_ADVERTISING;
892
893 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 settings |= MGMT_SETTING_SECURE_CONN;
895
896 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 settings |= MGMT_SETTING_DEBUG_KEYS;
898
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 settings |= MGMT_SETTING_PRIVACY;
901
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
905 *
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
909 *
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
912 * be evaluated.
913 */
914 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 settings |= MGMT_SETTING_STATIC_ADDRESS;
919 }
920
921 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923
924 if (cis_central_capable(hdev))
925 settings |= MGMT_SETTING_CIS_CENTRAL;
926
927 if (cis_peripheral_capable(hdev))
928 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929
930 if (bis_capable(hdev))
931 settings |= MGMT_SETTING_ISO_BROADCASTER;
932
933 if (sync_recv_capable(hdev))
934 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935
936 return settings;
937 }
938
pending_find(u16 opcode,struct hci_dev * hdev)939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943
mgmt_get_adv_discov_flags(struct hci_dev * hdev)944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 struct mgmt_pending_cmd *cmd;
947
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
950 */
951 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (cmd) {
953 struct mgmt_mode *cp = cmd->param;
954 if (cp->val == 0x01)
955 return LE_AD_GENERAL;
956 else if (cp->val == 0x02)
957 return LE_AD_LIMITED;
958 } else {
959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 return LE_AD_LIMITED;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 return LE_AD_GENERAL;
963 }
964
965 return 0;
966 }
967
mgmt_get_connectable(struct hci_dev * hdev)968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 struct mgmt_pending_cmd *cmd;
971
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
974 */
975 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 if (cmd) {
977 struct mgmt_mode *cp = cmd->param;
978
979 return cp->val;
980 }
981
982 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984
service_cache_sync(struct hci_dev * hdev,void * data)985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 hci_update_eir_sync(hdev);
988 hci_update_class_sync(hdev);
989
990 return 0;
991 }
992
service_cache_off(struct work_struct * work)993 static void service_cache_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 service_cache.work);
997
998 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 return;
1000
1001 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003
rpa_expired_sync(struct hci_dev * hdev,void * data)1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1008 * function.
1009 */
1010 if (ext_adv_capable(hdev))
1011 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 else
1013 return hci_enable_advertising_sync(hdev);
1014 }
1015
rpa_expired(struct work_struct * work)1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 rpa_expired.work);
1020
1021 bt_dev_dbg(hdev, "");
1022
1023 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024
1025 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 return;
1027
1028 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032
discov_off(struct work_struct * work)1033 static void discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 discov_off.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_lock(hdev);
1041
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1046 */
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1050
1051 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052
1053 mgmt_new_settings(hdev);
1054
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 u8 handle = mesh_tx->handle;
1064
1065 if (!silent)
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1068
1069 mgmt_mesh_remove(mesh_tx);
1070 }
1071
mesh_send_done_sync(struct hci_dev * hdev,void * data)1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 struct mgmt_mesh_tx *mesh_tx;
1075
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 if (list_empty(&hdev->adv_instances))
1078 hci_disable_advertising_sync(hdev);
1079 mesh_tx = mgmt_mesh_next(hdev, NULL);
1080
1081 if (mesh_tx)
1082 mesh_send_complete(hdev, mesh_tx, false);
1083
1084 return 0;
1085 }
1086
1087 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1088 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1089 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1090 {
1091 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1092
1093 if (!mesh_tx)
1094 return;
1095
1096 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1097 mesh_send_start_complete);
1098
1099 if (err < 0)
1100 mesh_send_complete(hdev, mesh_tx, false);
1101 else
1102 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1103 }
1104
mesh_send_done(struct work_struct * work)1105 static void mesh_send_done(struct work_struct *work)
1106 {
1107 struct hci_dev *hdev = container_of(work, struct hci_dev,
1108 mesh_send_done.work);
1109
1110 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1111 return;
1112
1113 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1114 }
1115
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1116 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1117 {
1118 if (hci_dev_test_flag(hdev, HCI_MGMT))
1119 return;
1120
1121 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1122
1123 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1124 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1125 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1126 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1127
1128 /* Non-mgmt controlled devices get this bit set
1129 * implicitly so that pairing works for them, however
1130 * for mgmt we require user-space to explicitly enable
1131 * it
1132 */
1133 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1134
1135 hci_dev_set_flag(hdev, HCI_MGMT);
1136 }
1137
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1138 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1139 void *data, u16 data_len)
1140 {
1141 struct mgmt_rp_read_info rp;
1142
1143 bt_dev_dbg(hdev, "sock %p", sk);
1144
1145 hci_dev_lock(hdev);
1146
1147 memset(&rp, 0, sizeof(rp));
1148
1149 bacpy(&rp.bdaddr, &hdev->bdaddr);
1150
1151 rp.version = hdev->hci_ver;
1152 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1153
1154 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1155 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1156
1157 memcpy(rp.dev_class, hdev->dev_class, 3);
1158
1159 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1160 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1161
1162 hci_dev_unlock(hdev);
1163
1164 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1165 sizeof(rp));
1166 }
1167
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1168 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1169 {
1170 u16 eir_len = 0;
1171 size_t name_len;
1172
1173 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1174 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1175 hdev->dev_class, 3);
1176
1177 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1178 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1179 hdev->appearance);
1180
1181 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1182 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1183 hdev->dev_name, name_len);
1184
1185 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1186 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1187 hdev->short_name, name_len);
1188
1189 return eir_len;
1190 }
1191
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1192 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1193 void *data, u16 data_len)
1194 {
1195 char buf[512];
1196 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1197 u16 eir_len;
1198
1199 bt_dev_dbg(hdev, "sock %p", sk);
1200
1201 memset(&buf, 0, sizeof(buf));
1202
1203 hci_dev_lock(hdev);
1204
1205 bacpy(&rp->bdaddr, &hdev->bdaddr);
1206
1207 rp->version = hdev->hci_ver;
1208 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1209
1210 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1211 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1212
1213
1214 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1215 rp->eir_len = cpu_to_le16(eir_len);
1216
1217 hci_dev_unlock(hdev);
1218
1219 /* If this command is called at least once, then the events
1220 * for class of device and local name changes are disabled
1221 * and only the new extended controller information event
1222 * is used.
1223 */
1224 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1226 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1227
1228 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1229 sizeof(*rp) + eir_len);
1230 }
1231
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1232 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1233 {
1234 char buf[512];
1235 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1236 u16 eir_len;
1237
1238 memset(buf, 0, sizeof(buf));
1239
1240 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1241 ev->eir_len = cpu_to_le16(eir_len);
1242
1243 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1244 sizeof(*ev) + eir_len,
1245 HCI_MGMT_EXT_INFO_EVENTS, skip);
1246 }
1247
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1248 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1249 {
1250 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1251
1252 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1253 sizeof(settings));
1254 }
1255
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1256 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1257 {
1258 struct mgmt_ev_advertising_added ev;
1259
1260 ev.instance = instance;
1261
1262 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1263 }
1264
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1265 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1266 u8 instance)
1267 {
1268 struct mgmt_ev_advertising_removed ev;
1269
1270 ev.instance = instance;
1271
1272 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1273 }
1274
cancel_adv_timeout(struct hci_dev * hdev)1275 static void cancel_adv_timeout(struct hci_dev *hdev)
1276 {
1277 if (hdev->adv_instance_timeout) {
1278 hdev->adv_instance_timeout = 0;
1279 cancel_delayed_work(&hdev->adv_instance_expire);
1280 }
1281 }
1282
1283 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1284 static void restart_le_actions(struct hci_dev *hdev)
1285 {
1286 struct hci_conn_params *p;
1287
1288 list_for_each_entry(p, &hdev->le_conn_params, list) {
1289 /* Needed for AUTO_OFF case where might not "really"
1290 * have been powered off.
1291 */
1292 hci_pend_le_list_del_init(p);
1293
1294 switch (p->auto_connect) {
1295 case HCI_AUTO_CONN_DIRECT:
1296 case HCI_AUTO_CONN_ALWAYS:
1297 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1298 break;
1299 case HCI_AUTO_CONN_REPORT:
1300 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1301 break;
1302 default:
1303 break;
1304 }
1305 }
1306 }
1307
new_settings(struct hci_dev * hdev,struct sock * skip)1308 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1309 {
1310 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1311
1312 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1313 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1314 }
1315
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1316 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1317 {
1318 struct mgmt_pending_cmd *cmd = data;
1319 struct mgmt_mode *cp;
1320
1321 /* Make sure cmd still outstanding. */
1322 if (err == -ECANCELED ||
1323 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1324 return;
1325
1326 cp = cmd->param;
1327
1328 bt_dev_dbg(hdev, "err %d", err);
1329
1330 if (!err) {
1331 if (cp->val) {
1332 hci_dev_lock(hdev);
1333 restart_le_actions(hdev);
1334 hci_update_passive_scan(hdev);
1335 hci_dev_unlock(hdev);
1336 }
1337
1338 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1339
1340 /* Only call new_setting for power on as power off is deferred
1341 * to hdev->power_off work which does call hci_dev_do_close.
1342 */
1343 if (cp->val)
1344 new_settings(hdev, cmd->sk);
1345 } else {
1346 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1347 mgmt_status(err));
1348 }
1349
1350 mgmt_pending_remove(cmd);
1351 }
1352
set_powered_sync(struct hci_dev * hdev,void * data)1353 static int set_powered_sync(struct hci_dev *hdev, void *data)
1354 {
1355 struct mgmt_pending_cmd *cmd = data;
1356 struct mgmt_mode *cp;
1357
1358 /* Make sure cmd still outstanding. */
1359 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1360 return -ECANCELED;
1361
1362 cp = cmd->param;
1363
1364 BT_DBG("%s", hdev->name);
1365
1366 return hci_set_powered_sync(hdev, cp->val);
1367 }
1368
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1369 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1370 u16 len)
1371 {
1372 struct mgmt_mode *cp = data;
1373 struct mgmt_pending_cmd *cmd;
1374 int err;
1375
1376 bt_dev_dbg(hdev, "sock %p", sk);
1377
1378 if (cp->val != 0x00 && cp->val != 0x01)
1379 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1380 MGMT_STATUS_INVALID_PARAMS);
1381
1382 hci_dev_lock(hdev);
1383
1384 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1385 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 MGMT_STATUS_BUSY);
1387 goto failed;
1388 }
1389
1390 if (!!cp->val == hdev_is_powered(hdev)) {
1391 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1392 goto failed;
1393 }
1394
1395 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1396 if (!cmd) {
1397 err = -ENOMEM;
1398 goto failed;
1399 }
1400
1401 /* Cancel potentially blocking sync operation before power off */
1402 if (cp->val == 0x00) {
1403 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1404 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1405 mgmt_set_powered_complete);
1406 } else {
1407 /* Use hci_cmd_sync_submit since hdev might not be running */
1408 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1409 mgmt_set_powered_complete);
1410 }
1411
1412 if (err < 0)
1413 mgmt_pending_remove(cmd);
1414
1415 failed:
1416 hci_dev_unlock(hdev);
1417 return err;
1418 }
1419
mgmt_new_settings(struct hci_dev * hdev)1420 int mgmt_new_settings(struct hci_dev *hdev)
1421 {
1422 return new_settings(hdev, NULL);
1423 }
1424
1425 struct cmd_lookup {
1426 struct sock *sk;
1427 struct hci_dev *hdev;
1428 u8 mgmt_status;
1429 };
1430
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1431 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1432 {
1433 struct cmd_lookup *match = data;
1434
1435 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1436
1437 if (match->sk == NULL) {
1438 match->sk = cmd->sk;
1439 sock_hold(match->sk);
1440 }
1441 }
1442
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1443 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 {
1445 u8 *status = data;
1446
1447 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1448 }
1449
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1450 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1451 {
1452 struct cmd_lookup *match = data;
1453
1454 /* dequeue cmd_sync entries using cmd as data as that is about to be
1455 * removed/freed.
1456 */
1457 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1458
1459 if (cmd->cmd_complete) {
1460 cmd->cmd_complete(cmd, match->mgmt_status);
1461 return;
1462 }
1463
1464 cmd_status_rsp(cmd, data);
1465 }
1466
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1467 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1468 {
1469 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1470 cmd->param, cmd->param_len);
1471 }
1472
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1473 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1476 cmd->param, sizeof(struct mgmt_addr_info));
1477 }
1478
mgmt_bredr_support(struct hci_dev * hdev)1479 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1480 {
1481 if (!lmp_bredr_capable(hdev))
1482 return MGMT_STATUS_NOT_SUPPORTED;
1483 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1484 return MGMT_STATUS_REJECTED;
1485 else
1486 return MGMT_STATUS_SUCCESS;
1487 }
1488
mgmt_le_support(struct hci_dev * hdev)1489 static u8 mgmt_le_support(struct hci_dev *hdev)
1490 {
1491 if (!lmp_le_capable(hdev))
1492 return MGMT_STATUS_NOT_SUPPORTED;
1493 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1494 return MGMT_STATUS_REJECTED;
1495 else
1496 return MGMT_STATUS_SUCCESS;
1497 }
1498
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1499 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1500 int err)
1501 {
1502 struct mgmt_pending_cmd *cmd = data;
1503
1504 bt_dev_dbg(hdev, "err %d", err);
1505
1506 /* Make sure cmd still outstanding. */
1507 if (err == -ECANCELED ||
1508 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1509 return;
1510
1511 hci_dev_lock(hdev);
1512
1513 if (err) {
1514 u8 mgmt_err = mgmt_status(err);
1515 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1516 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1517 goto done;
1518 }
1519
1520 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1521 hdev->discov_timeout > 0) {
1522 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1523 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1524 }
1525
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1528
1529 done:
1530 mgmt_pending_remove(cmd);
1531 hci_dev_unlock(hdev);
1532 }
1533
set_discoverable_sync(struct hci_dev * hdev,void * data)1534 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1535 {
1536 BT_DBG("%s", hdev->name);
1537
1538 return hci_update_discoverable_sync(hdev);
1539 }
1540
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1541 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 u16 len)
1543 {
1544 struct mgmt_cp_set_discoverable *cp = data;
1545 struct mgmt_pending_cmd *cmd;
1546 u16 timeout;
1547 int err;
1548
1549 bt_dev_dbg(hdev, "sock %p", sk);
1550
1551 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1552 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1553 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 MGMT_STATUS_REJECTED);
1555
1556 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1558 MGMT_STATUS_INVALID_PARAMS);
1559
1560 timeout = __le16_to_cpu(cp->timeout);
1561
1562 /* Disabling discoverable requires that no timeout is set,
1563 * and enabling limited discoverable requires a timeout.
1564 */
1565 if ((cp->val == 0x00 && timeout > 0) ||
1566 (cp->val == 0x02 && timeout == 0))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_INVALID_PARAMS);
1569
1570 hci_dev_lock(hdev);
1571
1572 if (!hdev_is_powered(hdev) && timeout > 0) {
1573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_NOT_POWERED);
1575 goto failed;
1576 }
1577
1578 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1579 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_BUSY);
1582 goto failed;
1583 }
1584
1585 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_REJECTED);
1588 goto failed;
1589 }
1590
1591 if (hdev->advertising_paused) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 if (!hdev_is_powered(hdev)) {
1598 bool changed = false;
1599
1600 /* Setting limited discoverable when powered off is
1601 * not a valid operation since it requires a timeout
1602 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1603 */
1604 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1605 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1606 changed = true;
1607 }
1608
1609 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1610 if (err < 0)
1611 goto failed;
1612
1613 if (changed)
1614 err = new_settings(hdev, sk);
1615
1616 goto failed;
1617 }
1618
1619 /* If the current mode is the same, then just update the timeout
1620 * value with the new value. And if only the timeout gets updated,
1621 * then no need for any HCI transactions.
1622 */
1623 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1624 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1625 HCI_LIMITED_DISCOVERABLE)) {
1626 cancel_delayed_work(&hdev->discov_off);
1627 hdev->discov_timeout = timeout;
1628
1629 if (cp->val && hdev->discov_timeout > 0) {
1630 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1631 queue_delayed_work(hdev->req_workqueue,
1632 &hdev->discov_off, to);
1633 }
1634
1635 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1636 goto failed;
1637 }
1638
1639 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1640 if (!cmd) {
1641 err = -ENOMEM;
1642 goto failed;
1643 }
1644
1645 /* Cancel any potential discoverable timeout that might be
1646 * still active and store new timeout value. The arming of
1647 * the timeout happens in the complete handler.
1648 */
1649 cancel_delayed_work(&hdev->discov_off);
1650 hdev->discov_timeout = timeout;
1651
1652 if (cp->val)
1653 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1654 else
1655 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1656
1657 /* Limited discoverable mode */
1658 if (cp->val == 0x02)
1659 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1660 else
1661 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1662
1663 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1664 mgmt_set_discoverable_complete);
1665
1666 if (err < 0)
1667 mgmt_pending_remove(cmd);
1668
1669 failed:
1670 hci_dev_unlock(hdev);
1671 return err;
1672 }
1673
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1674 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1675 int err)
1676 {
1677 struct mgmt_pending_cmd *cmd = data;
1678
1679 bt_dev_dbg(hdev, "err %d", err);
1680
1681 /* Make sure cmd still outstanding. */
1682 if (err == -ECANCELED ||
1683 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1684 return;
1685
1686 hci_dev_lock(hdev);
1687
1688 if (err) {
1689 u8 mgmt_err = mgmt_status(err);
1690 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1691 goto done;
1692 }
1693
1694 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1695 new_settings(hdev, cmd->sk);
1696
1697 done:
1698 if (cmd)
1699 mgmt_pending_remove(cmd);
1700
1701 hci_dev_unlock(hdev);
1702 }
1703
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1704 static int set_connectable_update_settings(struct hci_dev *hdev,
1705 struct sock *sk, u8 val)
1706 {
1707 bool changed = false;
1708 int err;
1709
1710 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1711 changed = true;
1712
1713 if (val) {
1714 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1715 } else {
1716 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1717 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1718 }
1719
1720 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1721 if (err < 0)
1722 return err;
1723
1724 if (changed) {
1725 hci_update_scan(hdev);
1726 hci_update_passive_scan(hdev);
1727 return new_settings(hdev, sk);
1728 }
1729
1730 return 0;
1731 }
1732
set_connectable_sync(struct hci_dev * hdev,void * data)1733 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1734 {
1735 BT_DBG("%s", hdev->name);
1736
1737 return hci_update_connectable_sync(hdev);
1738 }
1739
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1740 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1741 u16 len)
1742 {
1743 struct mgmt_mode *cp = data;
1744 struct mgmt_pending_cmd *cmd;
1745 int err;
1746
1747 bt_dev_dbg(hdev, "sock %p", sk);
1748
1749 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1750 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1752 MGMT_STATUS_REJECTED);
1753
1754 if (cp->val != 0x00 && cp->val != 0x01)
1755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756 MGMT_STATUS_INVALID_PARAMS);
1757
1758 hci_dev_lock(hdev);
1759
1760 if (!hdev_is_powered(hdev)) {
1761 err = set_connectable_update_settings(hdev, sk, cp->val);
1762 goto failed;
1763 }
1764
1765 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1766 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1768 MGMT_STATUS_BUSY);
1769 goto failed;
1770 }
1771
1772 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1773 if (!cmd) {
1774 err = -ENOMEM;
1775 goto failed;
1776 }
1777
1778 if (cp->val) {
1779 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1780 } else {
1781 if (hdev->discov_timeout > 0)
1782 cancel_delayed_work(&hdev->discov_off);
1783
1784 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1785 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1786 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1787 }
1788
1789 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1790 mgmt_set_connectable_complete);
1791
1792 if (err < 0)
1793 mgmt_pending_remove(cmd);
1794
1795 failed:
1796 hci_dev_unlock(hdev);
1797 return err;
1798 }
1799
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1800 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1801 u16 len)
1802 {
1803 struct mgmt_mode *cp = data;
1804 bool changed;
1805 int err;
1806
1807 bt_dev_dbg(hdev, "sock %p", sk);
1808
1809 if (cp->val != 0x00 && cp->val != 0x01)
1810 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1811 MGMT_STATUS_INVALID_PARAMS);
1812
1813 hci_dev_lock(hdev);
1814
1815 if (cp->val)
1816 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1817 else
1818 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1819
1820 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1821 if (err < 0)
1822 goto unlock;
1823
1824 if (changed) {
1825 /* In limited privacy mode the change of bondable mode
1826 * may affect the local advertising address.
1827 */
1828 hci_update_discoverable(hdev);
1829
1830 err = new_settings(hdev, sk);
1831 }
1832
1833 unlock:
1834 hci_dev_unlock(hdev);
1835 return err;
1836 }
1837
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1838 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1839 u16 len)
1840 {
1841 struct mgmt_mode *cp = data;
1842 struct mgmt_pending_cmd *cmd;
1843 u8 val, status;
1844 int err;
1845
1846 bt_dev_dbg(hdev, "sock %p", sk);
1847
1848 status = mgmt_bredr_support(hdev);
1849 if (status)
1850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1851 status);
1852
1853 if (cp->val != 0x00 && cp->val != 0x01)
1854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1855 MGMT_STATUS_INVALID_PARAMS);
1856
1857 hci_dev_lock(hdev);
1858
1859 if (!hdev_is_powered(hdev)) {
1860 bool changed = false;
1861
1862 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1863 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1864 changed = true;
1865 }
1866
1867 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1868 if (err < 0)
1869 goto failed;
1870
1871 if (changed)
1872 err = new_settings(hdev, sk);
1873
1874 goto failed;
1875 }
1876
1877 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1878 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1879 MGMT_STATUS_BUSY);
1880 goto failed;
1881 }
1882
1883 val = !!cp->val;
1884
1885 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1886 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 goto failed;
1888 }
1889
1890 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1891 if (!cmd) {
1892 err = -ENOMEM;
1893 goto failed;
1894 }
1895
1896 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1897 if (err < 0) {
1898 mgmt_pending_remove(cmd);
1899 goto failed;
1900 }
1901
1902 failed:
1903 hci_dev_unlock(hdev);
1904 return err;
1905 }
1906
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1907 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1908 {
1909 struct cmd_lookup match = { NULL, hdev };
1910 struct mgmt_pending_cmd *cmd = data;
1911 struct mgmt_mode *cp = cmd->param;
1912 u8 enable = cp->val;
1913 bool changed;
1914
1915 /* Make sure cmd still outstanding. */
1916 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1917 return;
1918
1919 if (err) {
1920 u8 mgmt_err = mgmt_status(err);
1921
1922 if (enable && hci_dev_test_and_clear_flag(hdev,
1923 HCI_SSP_ENABLED)) {
1924 new_settings(hdev, NULL);
1925 }
1926
1927 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
1928 cmd_status_rsp, &mgmt_err);
1929 return;
1930 }
1931
1932 if (enable) {
1933 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1934 } else {
1935 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1936 }
1937
1938 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
1939
1940 if (changed)
1941 new_settings(hdev, match.sk);
1942
1943 if (match.sk)
1944 sock_put(match.sk);
1945
1946 hci_update_eir_sync(hdev);
1947 }
1948
set_ssp_sync(struct hci_dev * hdev,void * data)1949 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1950 {
1951 struct mgmt_pending_cmd *cmd = data;
1952 struct mgmt_mode *cp = cmd->param;
1953 bool changed = false;
1954 int err;
1955
1956 if (cp->val)
1957 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1958
1959 err = hci_write_ssp_mode_sync(hdev, cp->val);
1960
1961 if (!err && changed)
1962 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1963
1964 return err;
1965 }
1966
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1967 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1968 {
1969 struct mgmt_mode *cp = data;
1970 struct mgmt_pending_cmd *cmd;
1971 u8 status;
1972 int err;
1973
1974 bt_dev_dbg(hdev, "sock %p", sk);
1975
1976 status = mgmt_bredr_support(hdev);
1977 if (status)
1978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1979
1980 if (!lmp_ssp_capable(hdev))
1981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1982 MGMT_STATUS_NOT_SUPPORTED);
1983
1984 if (cp->val != 0x00 && cp->val != 0x01)
1985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 MGMT_STATUS_INVALID_PARAMS);
1987
1988 hci_dev_lock(hdev);
1989
1990 if (!hdev_is_powered(hdev)) {
1991 bool changed;
1992
1993 if (cp->val) {
1994 changed = !hci_dev_test_and_set_flag(hdev,
1995 HCI_SSP_ENABLED);
1996 } else {
1997 changed = hci_dev_test_and_clear_flag(hdev,
1998 HCI_SSP_ENABLED);
1999 }
2000
2001 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2002 if (err < 0)
2003 goto failed;
2004
2005 if (changed)
2006 err = new_settings(hdev, sk);
2007
2008 goto failed;
2009 }
2010
2011 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2012 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2013 MGMT_STATUS_BUSY);
2014 goto failed;
2015 }
2016
2017 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2018 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2019 goto failed;
2020 }
2021
2022 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2023 if (!cmd)
2024 err = -ENOMEM;
2025 else
2026 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2027 set_ssp_complete);
2028
2029 if (err < 0) {
2030 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2031 MGMT_STATUS_FAILED);
2032
2033 if (cmd)
2034 mgmt_pending_remove(cmd);
2035 }
2036
2037 failed:
2038 hci_dev_unlock(hdev);
2039 return err;
2040 }
2041
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2042 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2043 {
2044 bt_dev_dbg(hdev, "sock %p", sk);
2045
2046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2047 MGMT_STATUS_NOT_SUPPORTED);
2048 }
2049
set_le_complete(struct hci_dev * hdev,void * data,int err)2050 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2051 {
2052 struct cmd_lookup match = { NULL, hdev };
2053 u8 status = mgmt_status(err);
2054
2055 bt_dev_dbg(hdev, "err %d", err);
2056
2057 if (status) {
2058 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
2059 &status);
2060 return;
2061 }
2062
2063 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
2064
2065 new_settings(hdev, match.sk);
2066
2067 if (match.sk)
2068 sock_put(match.sk);
2069 }
2070
set_le_sync(struct hci_dev * hdev,void * data)2071 static int set_le_sync(struct hci_dev *hdev, void *data)
2072 {
2073 struct mgmt_pending_cmd *cmd = data;
2074 struct mgmt_mode *cp = cmd->param;
2075 u8 val = !!cp->val;
2076 int err;
2077
2078 if (!val) {
2079 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2080
2081 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2082 hci_disable_advertising_sync(hdev);
2083
2084 if (ext_adv_capable(hdev))
2085 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2086 } else {
2087 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2088 }
2089
2090 err = hci_write_le_host_supported_sync(hdev, val, 0);
2091
2092 /* Make sure the controller has a good default for
2093 * advertising data. Restrict the update to when LE
2094 * has actually been enabled. During power on, the
2095 * update in powered_update_hci will take care of it.
2096 */
2097 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2098 if (ext_adv_capable(hdev)) {
2099 int status;
2100
2101 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2102 if (!status)
2103 hci_update_scan_rsp_data_sync(hdev, 0x00);
2104 } else {
2105 hci_update_adv_data_sync(hdev, 0x00);
2106 hci_update_scan_rsp_data_sync(hdev, 0x00);
2107 }
2108
2109 hci_update_passive_scan(hdev);
2110 }
2111
2112 return err;
2113 }
2114
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2115 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2116 {
2117 struct mgmt_pending_cmd *cmd = data;
2118 u8 status = mgmt_status(err);
2119 struct sock *sk = cmd->sk;
2120
2121 if (status) {
2122 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2123 cmd_status_rsp, &status);
2124 return;
2125 }
2126
2127 mgmt_pending_remove(cmd);
2128 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2129 }
2130
set_mesh_sync(struct hci_dev * hdev,void * data)2131 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2132 {
2133 struct mgmt_pending_cmd *cmd = data;
2134 struct mgmt_cp_set_mesh *cp = cmd->param;
2135 size_t len = cmd->param_len;
2136
2137 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2138
2139 if (cp->enable)
2140 hci_dev_set_flag(hdev, HCI_MESH);
2141 else
2142 hci_dev_clear_flag(hdev, HCI_MESH);
2143
2144 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2145 hdev->le_scan_window = __le16_to_cpu(cp->window);
2146
2147 len -= sizeof(*cp);
2148
2149 /* If filters don't fit, forward all adv pkts */
2150 if (len <= sizeof(hdev->mesh_ad_types))
2151 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2152
2153 hci_update_passive_scan_sync(hdev);
2154 return 0;
2155 }
2156
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2157 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2158 {
2159 struct mgmt_cp_set_mesh *cp = data;
2160 struct mgmt_pending_cmd *cmd;
2161 __u16 period, window;
2162 int err = 0;
2163
2164 bt_dev_dbg(hdev, "sock %p", sk);
2165
2166 if (!lmp_le_capable(hdev) ||
2167 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2169 MGMT_STATUS_NOT_SUPPORTED);
2170
2171 if (cp->enable != 0x00 && cp->enable != 0x01)
2172 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2173 MGMT_STATUS_INVALID_PARAMS);
2174
2175 /* Keep allowed ranges in sync with set_scan_params() */
2176 period = __le16_to_cpu(cp->period);
2177
2178 if (period < 0x0004 || period > 0x4000)
2179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_INVALID_PARAMS);
2181
2182 window = __le16_to_cpu(cp->window);
2183
2184 if (window < 0x0004 || window > 0x4000)
2185 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 MGMT_STATUS_INVALID_PARAMS);
2187
2188 if (window > period)
2189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2190 MGMT_STATUS_INVALID_PARAMS);
2191
2192 hci_dev_lock(hdev);
2193
2194 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2195 if (!cmd)
2196 err = -ENOMEM;
2197 else
2198 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2199 set_mesh_complete);
2200
2201 if (err < 0) {
2202 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2203 MGMT_STATUS_FAILED);
2204
2205 if (cmd)
2206 mgmt_pending_remove(cmd);
2207 }
2208
2209 hci_dev_unlock(hdev);
2210 return err;
2211 }
2212
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2213 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2214 {
2215 struct mgmt_mesh_tx *mesh_tx = data;
2216 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2217 unsigned long mesh_send_interval;
2218 u8 mgmt_err = mgmt_status(err);
2219
2220 /* Report any errors here, but don't report completion */
2221
2222 if (mgmt_err) {
2223 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2224 /* Send Complete Error Code for handle */
2225 mesh_send_complete(hdev, mesh_tx, false);
2226 return;
2227 }
2228
2229 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2230 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2231 mesh_send_interval);
2232 }
2233
mesh_send_sync(struct hci_dev * hdev,void * data)2234 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2235 {
2236 struct mgmt_mesh_tx *mesh_tx = data;
2237 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2238 struct adv_info *adv, *next_instance;
2239 u8 instance = hdev->le_num_of_adv_sets + 1;
2240 u16 timeout, duration;
2241 int err = 0;
2242
2243 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2244 return MGMT_STATUS_BUSY;
2245
2246 timeout = 1000;
2247 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2248 adv = hci_add_adv_instance(hdev, instance, 0,
2249 send->adv_data_len, send->adv_data,
2250 0, NULL,
2251 timeout, duration,
2252 HCI_ADV_TX_POWER_NO_PREFERENCE,
2253 hdev->le_adv_min_interval,
2254 hdev->le_adv_max_interval,
2255 mesh_tx->handle);
2256
2257 if (!IS_ERR(adv))
2258 mesh_tx->instance = instance;
2259 else
2260 err = PTR_ERR(adv);
2261
2262 if (hdev->cur_adv_instance == instance) {
2263 /* If the currently advertised instance is being changed then
2264 * cancel the current advertising and schedule the next
2265 * instance. If there is only one instance then the overridden
2266 * advertising data will be visible right away.
2267 */
2268 cancel_adv_timeout(hdev);
2269
2270 next_instance = hci_get_next_instance(hdev, instance);
2271 if (next_instance)
2272 instance = next_instance->instance;
2273 else
2274 instance = 0;
2275 } else if (hdev->adv_instance_timeout) {
2276 /* Immediately advertise the new instance if no other, or
2277 * let it go naturally from queue if ADV is already happening
2278 */
2279 instance = 0;
2280 }
2281
2282 if (instance)
2283 return hci_schedule_adv_instance_sync(hdev, instance, true);
2284
2285 return err;
2286 }
2287
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2288 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2289 {
2290 struct mgmt_rp_mesh_read_features *rp = data;
2291
2292 if (rp->used_handles >= rp->max_handles)
2293 return;
2294
2295 rp->handles[rp->used_handles++] = mesh_tx->handle;
2296 }
2297
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2298 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2299 void *data, u16 len)
2300 {
2301 struct mgmt_rp_mesh_read_features rp;
2302
2303 if (!lmp_le_capable(hdev) ||
2304 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2305 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2306 MGMT_STATUS_NOT_SUPPORTED);
2307
2308 memset(&rp, 0, sizeof(rp));
2309 rp.index = cpu_to_le16(hdev->id);
2310 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2311 rp.max_handles = MESH_HANDLES_MAX;
2312
2313 hci_dev_lock(hdev);
2314
2315 if (rp.max_handles)
2316 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2317
2318 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2319 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2320
2321 hci_dev_unlock(hdev);
2322 return 0;
2323 }
2324
send_cancel(struct hci_dev * hdev,void * data)2325 static int send_cancel(struct hci_dev *hdev, void *data)
2326 {
2327 struct mgmt_pending_cmd *cmd = data;
2328 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2329 struct mgmt_mesh_tx *mesh_tx;
2330
2331 if (!cancel->handle) {
2332 do {
2333 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2334
2335 if (mesh_tx)
2336 mesh_send_complete(hdev, mesh_tx, false);
2337 } while (mesh_tx);
2338 } else {
2339 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2340
2341 if (mesh_tx && mesh_tx->sk == cmd->sk)
2342 mesh_send_complete(hdev, mesh_tx, false);
2343 }
2344
2345 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2346 0, NULL, 0);
2347 mgmt_pending_free(cmd);
2348
2349 return 0;
2350 }
2351
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2352 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2353 void *data, u16 len)
2354 {
2355 struct mgmt_pending_cmd *cmd;
2356 int err;
2357
2358 if (!lmp_le_capable(hdev) ||
2359 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2360 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2361 MGMT_STATUS_NOT_SUPPORTED);
2362
2363 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2365 MGMT_STATUS_REJECTED);
2366
2367 hci_dev_lock(hdev);
2368 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2369 if (!cmd)
2370 err = -ENOMEM;
2371 else
2372 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2373
2374 if (err < 0) {
2375 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2376 MGMT_STATUS_FAILED);
2377
2378 if (cmd)
2379 mgmt_pending_free(cmd);
2380 }
2381
2382 hci_dev_unlock(hdev);
2383 return err;
2384 }
2385
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2386 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2387 {
2388 struct mgmt_mesh_tx *mesh_tx;
2389 struct mgmt_cp_mesh_send *send = data;
2390 struct mgmt_rp_mesh_read_features rp;
2391 bool sending;
2392 int err = 0;
2393
2394 if (!lmp_le_capable(hdev) ||
2395 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2396 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2397 MGMT_STATUS_NOT_SUPPORTED);
2398 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2399 len <= MGMT_MESH_SEND_SIZE ||
2400 len > (MGMT_MESH_SEND_SIZE + 31))
2401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2402 MGMT_STATUS_REJECTED);
2403
2404 hci_dev_lock(hdev);
2405
2406 memset(&rp, 0, sizeof(rp));
2407 rp.max_handles = MESH_HANDLES_MAX;
2408
2409 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2410
2411 if (rp.max_handles <= rp.used_handles) {
2412 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2413 MGMT_STATUS_BUSY);
2414 goto done;
2415 }
2416
2417 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2418 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2419
2420 if (!mesh_tx)
2421 err = -ENOMEM;
2422 else if (!sending)
2423 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2424 mesh_send_start_complete);
2425
2426 if (err < 0) {
2427 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2429 MGMT_STATUS_FAILED);
2430
2431 if (mesh_tx) {
2432 if (sending)
2433 mgmt_mesh_remove(mesh_tx);
2434 }
2435 } else {
2436 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2437
2438 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2439 &mesh_tx->handle, 1);
2440 }
2441
2442 done:
2443 hci_dev_unlock(hdev);
2444 return err;
2445 }
2446
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2447 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2448 {
2449 struct mgmt_mode *cp = data;
2450 struct mgmt_pending_cmd *cmd;
2451 int err;
2452 u8 val, enabled;
2453
2454 bt_dev_dbg(hdev, "sock %p", sk);
2455
2456 if (!lmp_le_capable(hdev))
2457 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2458 MGMT_STATUS_NOT_SUPPORTED);
2459
2460 if (cp->val != 0x00 && cp->val != 0x01)
2461 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2462 MGMT_STATUS_INVALID_PARAMS);
2463
2464 /* Bluetooth single mode LE only controllers or dual-mode
2465 * controllers configured as LE only devices, do not allow
2466 * switching LE off. These have either LE enabled explicitly
2467 * or BR/EDR has been previously switched off.
2468 *
2469 * When trying to enable an already enabled LE, then gracefully
2470 * send a positive response. Trying to disable it however will
2471 * result into rejection.
2472 */
2473 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2474 if (cp->val == 0x01)
2475 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2476
2477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2478 MGMT_STATUS_REJECTED);
2479 }
2480
2481 hci_dev_lock(hdev);
2482
2483 val = !!cp->val;
2484 enabled = lmp_host_le_capable(hdev);
2485
2486 if (!hdev_is_powered(hdev) || val == enabled) {
2487 bool changed = false;
2488
2489 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2490 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2491 changed = true;
2492 }
2493
2494 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2495 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2496 changed = true;
2497 }
2498
2499 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2500 if (err < 0)
2501 goto unlock;
2502
2503 if (changed)
2504 err = new_settings(hdev, sk);
2505
2506 goto unlock;
2507 }
2508
2509 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2510 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2511 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2512 MGMT_STATUS_BUSY);
2513 goto unlock;
2514 }
2515
2516 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2517 if (!cmd)
2518 err = -ENOMEM;
2519 else
2520 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2521 set_le_complete);
2522
2523 if (err < 0) {
2524 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2525 MGMT_STATUS_FAILED);
2526
2527 if (cmd)
2528 mgmt_pending_remove(cmd);
2529 }
2530
2531 unlock:
2532 hci_dev_unlock(hdev);
2533 return err;
2534 }
2535
2536 /* This is a helper function to test for pending mgmt commands that can
2537 * cause CoD or EIR HCI commands. We can only allow one such pending
2538 * mgmt command at a time since otherwise we cannot easily track what
2539 * the current values are, will be, and based on that calculate if a new
2540 * HCI command needs to be sent and if yes with what value.
2541 */
pending_eir_or_class(struct hci_dev * hdev)2542 static bool pending_eir_or_class(struct hci_dev *hdev)
2543 {
2544 struct mgmt_pending_cmd *cmd;
2545
2546 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2547 switch (cmd->opcode) {
2548 case MGMT_OP_ADD_UUID:
2549 case MGMT_OP_REMOVE_UUID:
2550 case MGMT_OP_SET_DEV_CLASS:
2551 case MGMT_OP_SET_POWERED:
2552 return true;
2553 }
2554 }
2555
2556 return false;
2557 }
2558
2559 static const u8 bluetooth_base_uuid[] = {
2560 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2561 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2562 };
2563
get_uuid_size(const u8 * uuid)2564 static u8 get_uuid_size(const u8 *uuid)
2565 {
2566 u32 val;
2567
2568 if (memcmp(uuid, bluetooth_base_uuid, 12))
2569 return 128;
2570
2571 val = get_unaligned_le32(&uuid[12]);
2572 if (val > 0xffff)
2573 return 32;
2574
2575 return 16;
2576 }
2577
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2578 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2579 {
2580 struct mgmt_pending_cmd *cmd = data;
2581
2582 bt_dev_dbg(hdev, "err %d", err);
2583
2584 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2585 mgmt_status(err), hdev->dev_class, 3);
2586
2587 mgmt_pending_free(cmd);
2588 }
2589
add_uuid_sync(struct hci_dev * hdev,void * data)2590 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2591 {
2592 int err;
2593
2594 err = hci_update_class_sync(hdev);
2595 if (err)
2596 return err;
2597
2598 return hci_update_eir_sync(hdev);
2599 }
2600
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2601 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2602 {
2603 struct mgmt_cp_add_uuid *cp = data;
2604 struct mgmt_pending_cmd *cmd;
2605 struct bt_uuid *uuid;
2606 int err;
2607
2608 bt_dev_dbg(hdev, "sock %p", sk);
2609
2610 hci_dev_lock(hdev);
2611
2612 if (pending_eir_or_class(hdev)) {
2613 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2614 MGMT_STATUS_BUSY);
2615 goto failed;
2616 }
2617
2618 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2619 if (!uuid) {
2620 err = -ENOMEM;
2621 goto failed;
2622 }
2623
2624 memcpy(uuid->uuid, cp->uuid, 16);
2625 uuid->svc_hint = cp->svc_hint;
2626 uuid->size = get_uuid_size(cp->uuid);
2627
2628 list_add_tail(&uuid->list, &hdev->uuids);
2629
2630 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2631 if (!cmd) {
2632 err = -ENOMEM;
2633 goto failed;
2634 }
2635
2636 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2637 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2638 */
2639 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2640 mgmt_class_complete);
2641 if (err < 0) {
2642 mgmt_pending_free(cmd);
2643 goto failed;
2644 }
2645
2646 failed:
2647 hci_dev_unlock(hdev);
2648 return err;
2649 }
2650
enable_service_cache(struct hci_dev * hdev)2651 static bool enable_service_cache(struct hci_dev *hdev)
2652 {
2653 if (!hdev_is_powered(hdev))
2654 return false;
2655
2656 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2657 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2658 CACHE_TIMEOUT);
2659 return true;
2660 }
2661
2662 return false;
2663 }
2664
remove_uuid_sync(struct hci_dev * hdev,void * data)2665 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2666 {
2667 int err;
2668
2669 err = hci_update_class_sync(hdev);
2670 if (err)
2671 return err;
2672
2673 return hci_update_eir_sync(hdev);
2674 }
2675
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2676 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2677 u16 len)
2678 {
2679 struct mgmt_cp_remove_uuid *cp = data;
2680 struct mgmt_pending_cmd *cmd;
2681 struct bt_uuid *match, *tmp;
2682 static const u8 bt_uuid_any[] = {
2683 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2684 };
2685 int err, found;
2686
2687 bt_dev_dbg(hdev, "sock %p", sk);
2688
2689 hci_dev_lock(hdev);
2690
2691 if (pending_eir_or_class(hdev)) {
2692 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2693 MGMT_STATUS_BUSY);
2694 goto unlock;
2695 }
2696
2697 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2698 hci_uuids_clear(hdev);
2699
2700 if (enable_service_cache(hdev)) {
2701 err = mgmt_cmd_complete(sk, hdev->id,
2702 MGMT_OP_REMOVE_UUID,
2703 0, hdev->dev_class, 3);
2704 goto unlock;
2705 }
2706
2707 goto update_class;
2708 }
2709
2710 found = 0;
2711
2712 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2713 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2714 continue;
2715
2716 list_del(&match->list);
2717 kfree(match);
2718 found++;
2719 }
2720
2721 if (found == 0) {
2722 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2723 MGMT_STATUS_INVALID_PARAMS);
2724 goto unlock;
2725 }
2726
2727 update_class:
2728 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2729 if (!cmd) {
2730 err = -ENOMEM;
2731 goto unlock;
2732 }
2733
2734 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2735 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2736 */
2737 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2738 mgmt_class_complete);
2739 if (err < 0)
2740 mgmt_pending_free(cmd);
2741
2742 unlock:
2743 hci_dev_unlock(hdev);
2744 return err;
2745 }
2746
set_class_sync(struct hci_dev * hdev,void * data)2747 static int set_class_sync(struct hci_dev *hdev, void *data)
2748 {
2749 int err = 0;
2750
2751 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2752 cancel_delayed_work_sync(&hdev->service_cache);
2753 err = hci_update_eir_sync(hdev);
2754 }
2755
2756 if (err)
2757 return err;
2758
2759 return hci_update_class_sync(hdev);
2760 }
2761
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2762 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2763 u16 len)
2764 {
2765 struct mgmt_cp_set_dev_class *cp = data;
2766 struct mgmt_pending_cmd *cmd;
2767 int err;
2768
2769 bt_dev_dbg(hdev, "sock %p", sk);
2770
2771 if (!lmp_bredr_capable(hdev))
2772 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2773 MGMT_STATUS_NOT_SUPPORTED);
2774
2775 hci_dev_lock(hdev);
2776
2777 if (pending_eir_or_class(hdev)) {
2778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2779 MGMT_STATUS_BUSY);
2780 goto unlock;
2781 }
2782
2783 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2785 MGMT_STATUS_INVALID_PARAMS);
2786 goto unlock;
2787 }
2788
2789 hdev->major_class = cp->major;
2790 hdev->minor_class = cp->minor;
2791
2792 if (!hdev_is_powered(hdev)) {
2793 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2794 hdev->dev_class, 3);
2795 goto unlock;
2796 }
2797
2798 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2799 if (!cmd) {
2800 err = -ENOMEM;
2801 goto unlock;
2802 }
2803
2804 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2805 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2806 */
2807 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2808 mgmt_class_complete);
2809 if (err < 0)
2810 mgmt_pending_free(cmd);
2811
2812 unlock:
2813 hci_dev_unlock(hdev);
2814 return err;
2815 }
2816
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2817 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2818 u16 len)
2819 {
2820 struct mgmt_cp_load_link_keys *cp = data;
2821 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2822 sizeof(struct mgmt_link_key_info));
2823 u16 key_count, expected_len;
2824 bool changed;
2825 int i;
2826
2827 bt_dev_dbg(hdev, "sock %p", sk);
2828
2829 if (!lmp_bredr_capable(hdev))
2830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2831 MGMT_STATUS_NOT_SUPPORTED);
2832
2833 key_count = __le16_to_cpu(cp->key_count);
2834 if (key_count > max_key_count) {
2835 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2836 key_count);
2837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2838 MGMT_STATUS_INVALID_PARAMS);
2839 }
2840
2841 expected_len = struct_size(cp, keys, key_count);
2842 if (expected_len != len) {
2843 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2844 expected_len, len);
2845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2846 MGMT_STATUS_INVALID_PARAMS);
2847 }
2848
2849 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2851 MGMT_STATUS_INVALID_PARAMS);
2852
2853 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2854 key_count);
2855
2856 hci_dev_lock(hdev);
2857
2858 hci_link_keys_clear(hdev);
2859
2860 if (cp->debug_keys)
2861 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2862 else
2863 changed = hci_dev_test_and_clear_flag(hdev,
2864 HCI_KEEP_DEBUG_KEYS);
2865
2866 if (changed)
2867 new_settings(hdev, NULL);
2868
2869 for (i = 0; i < key_count; i++) {
2870 struct mgmt_link_key_info *key = &cp->keys[i];
2871
2872 if (hci_is_blocked_key(hdev,
2873 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2874 key->val)) {
2875 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2876 &key->addr.bdaddr);
2877 continue;
2878 }
2879
2880 if (key->addr.type != BDADDR_BREDR) {
2881 bt_dev_warn(hdev,
2882 "Invalid link address type %u for %pMR",
2883 key->addr.type, &key->addr.bdaddr);
2884 continue;
2885 }
2886
2887 if (key->type > 0x08) {
2888 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2889 key->type, &key->addr.bdaddr);
2890 continue;
2891 }
2892
2893 /* Always ignore debug keys and require a new pairing if
2894 * the user wants to use them.
2895 */
2896 if (key->type == HCI_LK_DEBUG_COMBINATION)
2897 continue;
2898
2899 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2900 key->type, key->pin_len, NULL);
2901 }
2902
2903 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2904
2905 hci_dev_unlock(hdev);
2906
2907 return 0;
2908 }
2909
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2910 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2911 u8 addr_type, struct sock *skip_sk)
2912 {
2913 struct mgmt_ev_device_unpaired ev;
2914
2915 bacpy(&ev.addr.bdaddr, bdaddr);
2916 ev.addr.type = addr_type;
2917
2918 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2919 skip_sk);
2920 }
2921
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2922 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2923 {
2924 struct mgmt_pending_cmd *cmd = data;
2925 struct mgmt_cp_unpair_device *cp = cmd->param;
2926
2927 if (!err)
2928 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2929
2930 cmd->cmd_complete(cmd, err);
2931 mgmt_pending_free(cmd);
2932 }
2933
unpair_device_sync(struct hci_dev * hdev,void * data)2934 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2935 {
2936 struct mgmt_pending_cmd *cmd = data;
2937 struct mgmt_cp_unpair_device *cp = cmd->param;
2938 struct hci_conn *conn;
2939
2940 if (cp->addr.type == BDADDR_BREDR)
2941 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2942 &cp->addr.bdaddr);
2943 else
2944 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2945 le_addr_type(cp->addr.type));
2946
2947 if (!conn)
2948 return 0;
2949
2950 /* Disregard any possible error since the likes of hci_abort_conn_sync
2951 * will clean up the connection no matter the error.
2952 */
2953 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2954
2955 return 0;
2956 }
2957
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2958 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2959 u16 len)
2960 {
2961 struct mgmt_cp_unpair_device *cp = data;
2962 struct mgmt_rp_unpair_device rp;
2963 struct hci_conn_params *params;
2964 struct mgmt_pending_cmd *cmd;
2965 struct hci_conn *conn;
2966 u8 addr_type;
2967 int err;
2968
2969 memset(&rp, 0, sizeof(rp));
2970 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2971 rp.addr.type = cp->addr.type;
2972
2973 if (!bdaddr_type_is_valid(cp->addr.type))
2974 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2975 MGMT_STATUS_INVALID_PARAMS,
2976 &rp, sizeof(rp));
2977
2978 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2979 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2980 MGMT_STATUS_INVALID_PARAMS,
2981 &rp, sizeof(rp));
2982
2983 hci_dev_lock(hdev);
2984
2985 if (!hdev_is_powered(hdev)) {
2986 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2987 MGMT_STATUS_NOT_POWERED, &rp,
2988 sizeof(rp));
2989 goto unlock;
2990 }
2991
2992 if (cp->addr.type == BDADDR_BREDR) {
2993 /* If disconnection is requested, then look up the
2994 * connection. If the remote device is connected, it
2995 * will be later used to terminate the link.
2996 *
2997 * Setting it to NULL explicitly will cause no
2998 * termination of the link.
2999 */
3000 if (cp->disconnect)
3001 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3002 &cp->addr.bdaddr);
3003 else
3004 conn = NULL;
3005
3006 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3007 if (err < 0) {
3008 err = mgmt_cmd_complete(sk, hdev->id,
3009 MGMT_OP_UNPAIR_DEVICE,
3010 MGMT_STATUS_NOT_PAIRED, &rp,
3011 sizeof(rp));
3012 goto unlock;
3013 }
3014
3015 goto done;
3016 }
3017
3018 /* LE address type */
3019 addr_type = le_addr_type(cp->addr.type);
3020
3021 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3022 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3023 if (err < 0) {
3024 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3025 MGMT_STATUS_NOT_PAIRED, &rp,
3026 sizeof(rp));
3027 goto unlock;
3028 }
3029
3030 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3031 if (!conn) {
3032 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3033 goto done;
3034 }
3035
3036
3037 /* Defer clearing up the connection parameters until closing to
3038 * give a chance of keeping them if a repairing happens.
3039 */
3040 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3041
3042 /* Disable auto-connection parameters if present */
3043 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3044 if (params) {
3045 if (params->explicit_connect)
3046 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3047 else
3048 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3049 }
3050
3051 /* If disconnection is not requested, then clear the connection
3052 * variable so that the link is not terminated.
3053 */
3054 if (!cp->disconnect)
3055 conn = NULL;
3056
3057 done:
3058 /* If the connection variable is set, then termination of the
3059 * link is requested.
3060 */
3061 if (!conn) {
3062 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3063 &rp, sizeof(rp));
3064 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3065 goto unlock;
3066 }
3067
3068 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3069 sizeof(*cp));
3070 if (!cmd) {
3071 err = -ENOMEM;
3072 goto unlock;
3073 }
3074
3075 cmd->cmd_complete = addr_cmd_complete;
3076
3077 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3078 unpair_device_complete);
3079 if (err < 0)
3080 mgmt_pending_free(cmd);
3081
3082 unlock:
3083 hci_dev_unlock(hdev);
3084 return err;
3085 }
3086
disconnect_complete(struct hci_dev * hdev,void * data,int err)3087 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3088 {
3089 struct mgmt_pending_cmd *cmd = data;
3090
3091 cmd->cmd_complete(cmd, mgmt_status(err));
3092 mgmt_pending_free(cmd);
3093 }
3094
disconnect_sync(struct hci_dev * hdev,void * data)3095 static int disconnect_sync(struct hci_dev *hdev, void *data)
3096 {
3097 struct mgmt_pending_cmd *cmd = data;
3098 struct mgmt_cp_disconnect *cp = cmd->param;
3099 struct hci_conn *conn;
3100
3101 if (cp->addr.type == BDADDR_BREDR)
3102 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3103 &cp->addr.bdaddr);
3104 else
3105 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3106 le_addr_type(cp->addr.type));
3107
3108 if (!conn)
3109 return -ENOTCONN;
3110
3111 /* Disregard any possible error since the likes of hci_abort_conn_sync
3112 * will clean up the connection no matter the error.
3113 */
3114 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3115
3116 return 0;
3117 }
3118
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3119 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3120 u16 len)
3121 {
3122 struct mgmt_cp_disconnect *cp = data;
3123 struct mgmt_rp_disconnect rp;
3124 struct mgmt_pending_cmd *cmd;
3125 int err;
3126
3127 bt_dev_dbg(hdev, "sock %p", sk);
3128
3129 memset(&rp, 0, sizeof(rp));
3130 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3131 rp.addr.type = cp->addr.type;
3132
3133 if (!bdaddr_type_is_valid(cp->addr.type))
3134 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3135 MGMT_STATUS_INVALID_PARAMS,
3136 &rp, sizeof(rp));
3137
3138 hci_dev_lock(hdev);
3139
3140 if (!test_bit(HCI_UP, &hdev->flags)) {
3141 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3142 MGMT_STATUS_NOT_POWERED, &rp,
3143 sizeof(rp));
3144 goto failed;
3145 }
3146
3147 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3148 if (!cmd) {
3149 err = -ENOMEM;
3150 goto failed;
3151 }
3152
3153 cmd->cmd_complete = generic_cmd_complete;
3154
3155 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3156 disconnect_complete);
3157 if (err < 0)
3158 mgmt_pending_free(cmd);
3159
3160 failed:
3161 hci_dev_unlock(hdev);
3162 return err;
3163 }
3164
link_to_bdaddr(u8 link_type,u8 addr_type)3165 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3166 {
3167 switch (link_type) {
3168 case ISO_LINK:
3169 case LE_LINK:
3170 switch (addr_type) {
3171 case ADDR_LE_DEV_PUBLIC:
3172 return BDADDR_LE_PUBLIC;
3173
3174 default:
3175 /* Fallback to LE Random address type */
3176 return BDADDR_LE_RANDOM;
3177 }
3178
3179 default:
3180 /* Fallback to BR/EDR type */
3181 return BDADDR_BREDR;
3182 }
3183 }
3184
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3185 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3186 u16 data_len)
3187 {
3188 struct mgmt_rp_get_connections *rp;
3189 struct hci_conn *c;
3190 int err;
3191 u16 i;
3192
3193 bt_dev_dbg(hdev, "sock %p", sk);
3194
3195 hci_dev_lock(hdev);
3196
3197 if (!hdev_is_powered(hdev)) {
3198 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3199 MGMT_STATUS_NOT_POWERED);
3200 goto unlock;
3201 }
3202
3203 i = 0;
3204 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3205 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3206 i++;
3207 }
3208
3209 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3210 if (!rp) {
3211 err = -ENOMEM;
3212 goto unlock;
3213 }
3214
3215 i = 0;
3216 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3217 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3218 continue;
3219 bacpy(&rp->addr[i].bdaddr, &c->dst);
3220 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3221 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3222 continue;
3223 i++;
3224 }
3225
3226 rp->conn_count = cpu_to_le16(i);
3227
3228 /* Recalculate length in case of filtered SCO connections, etc */
3229 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3230 struct_size(rp, addr, i));
3231
3232 kfree(rp);
3233
3234 unlock:
3235 hci_dev_unlock(hdev);
3236 return err;
3237 }
3238
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3239 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3240 struct mgmt_cp_pin_code_neg_reply *cp)
3241 {
3242 struct mgmt_pending_cmd *cmd;
3243 int err;
3244
3245 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3246 sizeof(*cp));
3247 if (!cmd)
3248 return -ENOMEM;
3249
3250 cmd->cmd_complete = addr_cmd_complete;
3251
3252 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3253 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3254 if (err < 0)
3255 mgmt_pending_remove(cmd);
3256
3257 return err;
3258 }
3259
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3260 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3261 u16 len)
3262 {
3263 struct hci_conn *conn;
3264 struct mgmt_cp_pin_code_reply *cp = data;
3265 struct hci_cp_pin_code_reply reply;
3266 struct mgmt_pending_cmd *cmd;
3267 int err;
3268
3269 bt_dev_dbg(hdev, "sock %p", sk);
3270
3271 hci_dev_lock(hdev);
3272
3273 if (!hdev_is_powered(hdev)) {
3274 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3275 MGMT_STATUS_NOT_POWERED);
3276 goto failed;
3277 }
3278
3279 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3280 if (!conn) {
3281 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3282 MGMT_STATUS_NOT_CONNECTED);
3283 goto failed;
3284 }
3285
3286 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3287 struct mgmt_cp_pin_code_neg_reply ncp;
3288
3289 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3290
3291 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3292
3293 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3294 if (err >= 0)
3295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3296 MGMT_STATUS_INVALID_PARAMS);
3297
3298 goto failed;
3299 }
3300
3301 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3302 if (!cmd) {
3303 err = -ENOMEM;
3304 goto failed;
3305 }
3306
3307 cmd->cmd_complete = addr_cmd_complete;
3308
3309 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3310 reply.pin_len = cp->pin_len;
3311 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3312
3313 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3314 if (err < 0)
3315 mgmt_pending_remove(cmd);
3316
3317 failed:
3318 hci_dev_unlock(hdev);
3319 return err;
3320 }
3321
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3322 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3323 u16 len)
3324 {
3325 struct mgmt_cp_set_io_capability *cp = data;
3326
3327 bt_dev_dbg(hdev, "sock %p", sk);
3328
3329 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3331 MGMT_STATUS_INVALID_PARAMS);
3332
3333 hci_dev_lock(hdev);
3334
3335 hdev->io_capability = cp->io_capability;
3336
3337 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3338
3339 hci_dev_unlock(hdev);
3340
3341 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3342 NULL, 0);
3343 }
3344
find_pairing(struct hci_conn * conn)3345 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3346 {
3347 struct hci_dev *hdev = conn->hdev;
3348 struct mgmt_pending_cmd *cmd;
3349
3350 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3351 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3352 continue;
3353
3354 if (cmd->user_data != conn)
3355 continue;
3356
3357 return cmd;
3358 }
3359
3360 return NULL;
3361 }
3362
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3363 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3364 {
3365 struct mgmt_rp_pair_device rp;
3366 struct hci_conn *conn = cmd->user_data;
3367 int err;
3368
3369 bacpy(&rp.addr.bdaddr, &conn->dst);
3370 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3371
3372 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3373 status, &rp, sizeof(rp));
3374
3375 /* So we don't get further callbacks for this connection */
3376 conn->connect_cfm_cb = NULL;
3377 conn->security_cfm_cb = NULL;
3378 conn->disconn_cfm_cb = NULL;
3379
3380 hci_conn_drop(conn);
3381
3382 /* The device is paired so there is no need to remove
3383 * its connection parameters anymore.
3384 */
3385 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3386
3387 hci_conn_put(conn);
3388
3389 return err;
3390 }
3391
mgmt_smp_complete(struct hci_conn * conn,bool complete)3392 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3393 {
3394 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3395 struct mgmt_pending_cmd *cmd;
3396
3397 cmd = find_pairing(conn);
3398 if (cmd) {
3399 cmd->cmd_complete(cmd, status);
3400 mgmt_pending_remove(cmd);
3401 }
3402 }
3403
pairing_complete_cb(struct hci_conn * conn,u8 status)3404 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3405 {
3406 struct mgmt_pending_cmd *cmd;
3407
3408 BT_DBG("status %u", status);
3409
3410 cmd = find_pairing(conn);
3411 if (!cmd) {
3412 BT_DBG("Unable to find a pending command");
3413 return;
3414 }
3415
3416 cmd->cmd_complete(cmd, mgmt_status(status));
3417 mgmt_pending_remove(cmd);
3418 }
3419
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3420 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3421 {
3422 struct mgmt_pending_cmd *cmd;
3423
3424 BT_DBG("status %u", status);
3425
3426 if (!status)
3427 return;
3428
3429 cmd = find_pairing(conn);
3430 if (!cmd) {
3431 BT_DBG("Unable to find a pending command");
3432 return;
3433 }
3434
3435 cmd->cmd_complete(cmd, mgmt_status(status));
3436 mgmt_pending_remove(cmd);
3437 }
3438
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3439 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3440 u16 len)
3441 {
3442 struct mgmt_cp_pair_device *cp = data;
3443 struct mgmt_rp_pair_device rp;
3444 struct mgmt_pending_cmd *cmd;
3445 u8 sec_level, auth_type;
3446 struct hci_conn *conn;
3447 int err;
3448
3449 bt_dev_dbg(hdev, "sock %p", sk);
3450
3451 memset(&rp, 0, sizeof(rp));
3452 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3453 rp.addr.type = cp->addr.type;
3454
3455 if (!bdaddr_type_is_valid(cp->addr.type))
3456 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3457 MGMT_STATUS_INVALID_PARAMS,
3458 &rp, sizeof(rp));
3459
3460 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3461 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3462 MGMT_STATUS_INVALID_PARAMS,
3463 &rp, sizeof(rp));
3464
3465 hci_dev_lock(hdev);
3466
3467 if (!hdev_is_powered(hdev)) {
3468 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3469 MGMT_STATUS_NOT_POWERED, &rp,
3470 sizeof(rp));
3471 goto unlock;
3472 }
3473
3474 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3475 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3476 MGMT_STATUS_ALREADY_PAIRED, &rp,
3477 sizeof(rp));
3478 goto unlock;
3479 }
3480
3481 sec_level = BT_SECURITY_MEDIUM;
3482 auth_type = HCI_AT_DEDICATED_BONDING;
3483
3484 if (cp->addr.type == BDADDR_BREDR) {
3485 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3486 auth_type, CONN_REASON_PAIR_DEVICE);
3487 } else {
3488 u8 addr_type = le_addr_type(cp->addr.type);
3489 struct hci_conn_params *p;
3490
3491 /* When pairing a new device, it is expected to remember
3492 * this device for future connections. Adding the connection
3493 * parameter information ahead of time allows tracking
3494 * of the peripheral preferred values and will speed up any
3495 * further connection establishment.
3496 *
3497 * If connection parameters already exist, then they
3498 * will be kept and this function does nothing.
3499 */
3500 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3501 if (!p) {
3502 err = -EIO;
3503 goto unlock;
3504 }
3505
3506 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3507 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3508
3509 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3510 sec_level, HCI_LE_CONN_TIMEOUT,
3511 CONN_REASON_PAIR_DEVICE);
3512 }
3513
3514 if (IS_ERR(conn)) {
3515 int status;
3516
3517 if (PTR_ERR(conn) == -EBUSY)
3518 status = MGMT_STATUS_BUSY;
3519 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3520 status = MGMT_STATUS_NOT_SUPPORTED;
3521 else if (PTR_ERR(conn) == -ECONNREFUSED)
3522 status = MGMT_STATUS_REJECTED;
3523 else
3524 status = MGMT_STATUS_CONNECT_FAILED;
3525
3526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3527 status, &rp, sizeof(rp));
3528 goto unlock;
3529 }
3530
3531 if (conn->connect_cfm_cb) {
3532 hci_conn_drop(conn);
3533 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3534 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3535 goto unlock;
3536 }
3537
3538 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3539 if (!cmd) {
3540 err = -ENOMEM;
3541 hci_conn_drop(conn);
3542 goto unlock;
3543 }
3544
3545 cmd->cmd_complete = pairing_complete;
3546
3547 /* For LE, just connecting isn't a proof that the pairing finished */
3548 if (cp->addr.type == BDADDR_BREDR) {
3549 conn->connect_cfm_cb = pairing_complete_cb;
3550 conn->security_cfm_cb = pairing_complete_cb;
3551 conn->disconn_cfm_cb = pairing_complete_cb;
3552 } else {
3553 conn->connect_cfm_cb = le_pairing_complete_cb;
3554 conn->security_cfm_cb = le_pairing_complete_cb;
3555 conn->disconn_cfm_cb = le_pairing_complete_cb;
3556 }
3557
3558 conn->io_capability = cp->io_cap;
3559 cmd->user_data = hci_conn_get(conn);
3560
3561 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3562 hci_conn_security(conn, sec_level, auth_type, true)) {
3563 cmd->cmd_complete(cmd, 0);
3564 mgmt_pending_remove(cmd);
3565 }
3566
3567 err = 0;
3568
3569 unlock:
3570 hci_dev_unlock(hdev);
3571 return err;
3572 }
3573
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3574 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3575 u16 len)
3576 {
3577 struct mgmt_addr_info *addr = data;
3578 struct mgmt_pending_cmd *cmd;
3579 struct hci_conn *conn;
3580 int err;
3581
3582 bt_dev_dbg(hdev, "sock %p", sk);
3583
3584 hci_dev_lock(hdev);
3585
3586 if (!hdev_is_powered(hdev)) {
3587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3588 MGMT_STATUS_NOT_POWERED);
3589 goto unlock;
3590 }
3591
3592 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3593 if (!cmd) {
3594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3595 MGMT_STATUS_INVALID_PARAMS);
3596 goto unlock;
3597 }
3598
3599 conn = cmd->user_data;
3600
3601 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3602 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3603 MGMT_STATUS_INVALID_PARAMS);
3604 goto unlock;
3605 }
3606
3607 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3608 mgmt_pending_remove(cmd);
3609
3610 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3611 addr, sizeof(*addr));
3612
3613 /* Since user doesn't want to proceed with the connection, abort any
3614 * ongoing pairing and then terminate the link if it was created
3615 * because of the pair device action.
3616 */
3617 if (addr->type == BDADDR_BREDR)
3618 hci_remove_link_key(hdev, &addr->bdaddr);
3619 else
3620 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3621 le_addr_type(addr->type));
3622
3623 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3624 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3625
3626 unlock:
3627 hci_dev_unlock(hdev);
3628 return err;
3629 }
3630
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3631 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3632 struct mgmt_addr_info *addr, u16 mgmt_op,
3633 u16 hci_op, __le32 passkey)
3634 {
3635 struct mgmt_pending_cmd *cmd;
3636 struct hci_conn *conn;
3637 int err;
3638
3639 hci_dev_lock(hdev);
3640
3641 if (!hdev_is_powered(hdev)) {
3642 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3643 MGMT_STATUS_NOT_POWERED, addr,
3644 sizeof(*addr));
3645 goto done;
3646 }
3647
3648 if (addr->type == BDADDR_BREDR)
3649 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3650 else
3651 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3652 le_addr_type(addr->type));
3653
3654 if (!conn) {
3655 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3656 MGMT_STATUS_NOT_CONNECTED, addr,
3657 sizeof(*addr));
3658 goto done;
3659 }
3660
3661 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3662 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3663 if (!err)
3664 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3665 MGMT_STATUS_SUCCESS, addr,
3666 sizeof(*addr));
3667 else
3668 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3669 MGMT_STATUS_FAILED, addr,
3670 sizeof(*addr));
3671
3672 goto done;
3673 }
3674
3675 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3676 if (!cmd) {
3677 err = -ENOMEM;
3678 goto done;
3679 }
3680
3681 cmd->cmd_complete = addr_cmd_complete;
3682
3683 /* Continue with pairing via HCI */
3684 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3685 struct hci_cp_user_passkey_reply cp;
3686
3687 bacpy(&cp.bdaddr, &addr->bdaddr);
3688 cp.passkey = passkey;
3689 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3690 } else
3691 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3692 &addr->bdaddr);
3693
3694 if (err < 0)
3695 mgmt_pending_remove(cmd);
3696
3697 done:
3698 hci_dev_unlock(hdev);
3699 return err;
3700 }
3701
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3702 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3703 void *data, u16 len)
3704 {
3705 struct mgmt_cp_pin_code_neg_reply *cp = data;
3706
3707 bt_dev_dbg(hdev, "sock %p", sk);
3708
3709 return user_pairing_resp(sk, hdev, &cp->addr,
3710 MGMT_OP_PIN_CODE_NEG_REPLY,
3711 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3712 }
3713
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3714 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3715 u16 len)
3716 {
3717 struct mgmt_cp_user_confirm_reply *cp = data;
3718
3719 bt_dev_dbg(hdev, "sock %p", sk);
3720
3721 if (len != sizeof(*cp))
3722 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3723 MGMT_STATUS_INVALID_PARAMS);
3724
3725 return user_pairing_resp(sk, hdev, &cp->addr,
3726 MGMT_OP_USER_CONFIRM_REPLY,
3727 HCI_OP_USER_CONFIRM_REPLY, 0);
3728 }
3729
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3730 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3731 void *data, u16 len)
3732 {
3733 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3734
3735 bt_dev_dbg(hdev, "sock %p", sk);
3736
3737 return user_pairing_resp(sk, hdev, &cp->addr,
3738 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3739 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3740 }
3741
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3742 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3743 u16 len)
3744 {
3745 struct mgmt_cp_user_passkey_reply *cp = data;
3746
3747 bt_dev_dbg(hdev, "sock %p", sk);
3748
3749 return user_pairing_resp(sk, hdev, &cp->addr,
3750 MGMT_OP_USER_PASSKEY_REPLY,
3751 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3752 }
3753
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3754 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3755 void *data, u16 len)
3756 {
3757 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3758
3759 bt_dev_dbg(hdev, "sock %p", sk);
3760
3761 return user_pairing_resp(sk, hdev, &cp->addr,
3762 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3763 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3764 }
3765
adv_expire_sync(struct hci_dev * hdev,u32 flags)3766 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3767 {
3768 struct adv_info *adv_instance;
3769
3770 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3771 if (!adv_instance)
3772 return 0;
3773
3774 /* stop if current instance doesn't need to be changed */
3775 if (!(adv_instance->flags & flags))
3776 return 0;
3777
3778 cancel_adv_timeout(hdev);
3779
3780 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3781 if (!adv_instance)
3782 return 0;
3783
3784 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3785
3786 return 0;
3787 }
3788
name_changed_sync(struct hci_dev * hdev,void * data)3789 static int name_changed_sync(struct hci_dev *hdev, void *data)
3790 {
3791 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3792 }
3793
set_name_complete(struct hci_dev * hdev,void * data,int err)3794 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3795 {
3796 struct mgmt_pending_cmd *cmd = data;
3797 struct mgmt_cp_set_local_name *cp = cmd->param;
3798 u8 status = mgmt_status(err);
3799
3800 bt_dev_dbg(hdev, "err %d", err);
3801
3802 if (err == -ECANCELED ||
3803 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3804 return;
3805
3806 if (status) {
3807 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3808 status);
3809 } else {
3810 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3811 cp, sizeof(*cp));
3812
3813 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3814 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3815 }
3816
3817 mgmt_pending_remove(cmd);
3818 }
3819
set_name_sync(struct hci_dev * hdev,void * data)3820 static int set_name_sync(struct hci_dev *hdev, void *data)
3821 {
3822 if (lmp_bredr_capable(hdev)) {
3823 hci_update_name_sync(hdev);
3824 hci_update_eir_sync(hdev);
3825 }
3826
3827 /* The name is stored in the scan response data and so
3828 * no need to update the advertising data here.
3829 */
3830 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3831 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3832
3833 return 0;
3834 }
3835
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3836 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3837 u16 len)
3838 {
3839 struct mgmt_cp_set_local_name *cp = data;
3840 struct mgmt_pending_cmd *cmd;
3841 int err;
3842
3843 bt_dev_dbg(hdev, "sock %p", sk);
3844
3845 hci_dev_lock(hdev);
3846
3847 /* If the old values are the same as the new ones just return a
3848 * direct command complete event.
3849 */
3850 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3851 !memcmp(hdev->short_name, cp->short_name,
3852 sizeof(hdev->short_name))) {
3853 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3854 data, len);
3855 goto failed;
3856 }
3857
3858 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3859
3860 if (!hdev_is_powered(hdev)) {
3861 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3862
3863 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3864 data, len);
3865 if (err < 0)
3866 goto failed;
3867
3868 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3869 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3870 ext_info_changed(hdev, sk);
3871
3872 goto failed;
3873 }
3874
3875 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3876 if (!cmd)
3877 err = -ENOMEM;
3878 else
3879 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3880 set_name_complete);
3881
3882 if (err < 0) {
3883 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3884 MGMT_STATUS_FAILED);
3885
3886 if (cmd)
3887 mgmt_pending_remove(cmd);
3888
3889 goto failed;
3890 }
3891
3892 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3893
3894 failed:
3895 hci_dev_unlock(hdev);
3896 return err;
3897 }
3898
appearance_changed_sync(struct hci_dev * hdev,void * data)3899 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3900 {
3901 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3902 }
3903
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3904 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3905 u16 len)
3906 {
3907 struct mgmt_cp_set_appearance *cp = data;
3908 u16 appearance;
3909 int err;
3910
3911 bt_dev_dbg(hdev, "sock %p", sk);
3912
3913 if (!lmp_le_capable(hdev))
3914 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3915 MGMT_STATUS_NOT_SUPPORTED);
3916
3917 appearance = le16_to_cpu(cp->appearance);
3918
3919 hci_dev_lock(hdev);
3920
3921 if (hdev->appearance != appearance) {
3922 hdev->appearance = appearance;
3923
3924 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3925 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3926 NULL);
3927
3928 ext_info_changed(hdev, sk);
3929 }
3930
3931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3932 0);
3933
3934 hci_dev_unlock(hdev);
3935
3936 return err;
3937 }
3938
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3939 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3940 void *data, u16 len)
3941 {
3942 struct mgmt_rp_get_phy_configuration rp;
3943
3944 bt_dev_dbg(hdev, "sock %p", sk);
3945
3946 hci_dev_lock(hdev);
3947
3948 memset(&rp, 0, sizeof(rp));
3949
3950 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3951 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3952 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3953
3954 hci_dev_unlock(hdev);
3955
3956 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3957 &rp, sizeof(rp));
3958 }
3959
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3960 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3961 {
3962 struct mgmt_ev_phy_configuration_changed ev;
3963
3964 memset(&ev, 0, sizeof(ev));
3965
3966 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3967
3968 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3969 sizeof(ev), skip);
3970 }
3971
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3972 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3973 {
3974 struct mgmt_pending_cmd *cmd = data;
3975 struct sk_buff *skb = cmd->skb;
3976 u8 status = mgmt_status(err);
3977
3978 if (err == -ECANCELED ||
3979 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3980 return;
3981
3982 if (!status) {
3983 if (!skb)
3984 status = MGMT_STATUS_FAILED;
3985 else if (IS_ERR(skb))
3986 status = mgmt_status(PTR_ERR(skb));
3987 else
3988 status = mgmt_status(skb->data[0]);
3989 }
3990
3991 bt_dev_dbg(hdev, "status %d", status);
3992
3993 if (status) {
3994 mgmt_cmd_status(cmd->sk, hdev->id,
3995 MGMT_OP_SET_PHY_CONFIGURATION, status);
3996 } else {
3997 mgmt_cmd_complete(cmd->sk, hdev->id,
3998 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3999 NULL, 0);
4000
4001 mgmt_phy_configuration_changed(hdev, cmd->sk);
4002 }
4003
4004 if (skb && !IS_ERR(skb))
4005 kfree_skb(skb);
4006
4007 mgmt_pending_remove(cmd);
4008 }
4009
set_default_phy_sync(struct hci_dev * hdev,void * data)4010 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4011 {
4012 struct mgmt_pending_cmd *cmd = data;
4013 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4014 struct hci_cp_le_set_default_phy cp_phy;
4015 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4016
4017 memset(&cp_phy, 0, sizeof(cp_phy));
4018
4019 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4020 cp_phy.all_phys |= 0x01;
4021
4022 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4023 cp_phy.all_phys |= 0x02;
4024
4025 if (selected_phys & MGMT_PHY_LE_1M_TX)
4026 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4027
4028 if (selected_phys & MGMT_PHY_LE_2M_TX)
4029 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4030
4031 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4032 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4033
4034 if (selected_phys & MGMT_PHY_LE_1M_RX)
4035 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4036
4037 if (selected_phys & MGMT_PHY_LE_2M_RX)
4038 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4039
4040 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4041 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4042
4043 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4044 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4045
4046 return 0;
4047 }
4048
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4049 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4050 void *data, u16 len)
4051 {
4052 struct mgmt_cp_set_phy_configuration *cp = data;
4053 struct mgmt_pending_cmd *cmd;
4054 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4055 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4056 bool changed = false;
4057 int err;
4058
4059 bt_dev_dbg(hdev, "sock %p", sk);
4060
4061 configurable_phys = get_configurable_phys(hdev);
4062 supported_phys = get_supported_phys(hdev);
4063 selected_phys = __le32_to_cpu(cp->selected_phys);
4064
4065 if (selected_phys & ~supported_phys)
4066 return mgmt_cmd_status(sk, hdev->id,
4067 MGMT_OP_SET_PHY_CONFIGURATION,
4068 MGMT_STATUS_INVALID_PARAMS);
4069
4070 unconfigure_phys = supported_phys & ~configurable_phys;
4071
4072 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4073 return mgmt_cmd_status(sk, hdev->id,
4074 MGMT_OP_SET_PHY_CONFIGURATION,
4075 MGMT_STATUS_INVALID_PARAMS);
4076
4077 if (selected_phys == get_selected_phys(hdev))
4078 return mgmt_cmd_complete(sk, hdev->id,
4079 MGMT_OP_SET_PHY_CONFIGURATION,
4080 0, NULL, 0);
4081
4082 hci_dev_lock(hdev);
4083
4084 if (!hdev_is_powered(hdev)) {
4085 err = mgmt_cmd_status(sk, hdev->id,
4086 MGMT_OP_SET_PHY_CONFIGURATION,
4087 MGMT_STATUS_REJECTED);
4088 goto unlock;
4089 }
4090
4091 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4092 err = mgmt_cmd_status(sk, hdev->id,
4093 MGMT_OP_SET_PHY_CONFIGURATION,
4094 MGMT_STATUS_BUSY);
4095 goto unlock;
4096 }
4097
4098 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4099 pkt_type |= (HCI_DH3 | HCI_DM3);
4100 else
4101 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4102
4103 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4104 pkt_type |= (HCI_DH5 | HCI_DM5);
4105 else
4106 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4107
4108 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4109 pkt_type &= ~HCI_2DH1;
4110 else
4111 pkt_type |= HCI_2DH1;
4112
4113 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4114 pkt_type &= ~HCI_2DH3;
4115 else
4116 pkt_type |= HCI_2DH3;
4117
4118 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4119 pkt_type &= ~HCI_2DH5;
4120 else
4121 pkt_type |= HCI_2DH5;
4122
4123 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4124 pkt_type &= ~HCI_3DH1;
4125 else
4126 pkt_type |= HCI_3DH1;
4127
4128 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4129 pkt_type &= ~HCI_3DH3;
4130 else
4131 pkt_type |= HCI_3DH3;
4132
4133 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4134 pkt_type &= ~HCI_3DH5;
4135 else
4136 pkt_type |= HCI_3DH5;
4137
4138 if (pkt_type != hdev->pkt_type) {
4139 hdev->pkt_type = pkt_type;
4140 changed = true;
4141 }
4142
4143 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4144 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4145 if (changed)
4146 mgmt_phy_configuration_changed(hdev, sk);
4147
4148 err = mgmt_cmd_complete(sk, hdev->id,
4149 MGMT_OP_SET_PHY_CONFIGURATION,
4150 0, NULL, 0);
4151
4152 goto unlock;
4153 }
4154
4155 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4156 len);
4157 if (!cmd)
4158 err = -ENOMEM;
4159 else
4160 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4161 set_default_phy_complete);
4162
4163 if (err < 0) {
4164 err = mgmt_cmd_status(sk, hdev->id,
4165 MGMT_OP_SET_PHY_CONFIGURATION,
4166 MGMT_STATUS_FAILED);
4167
4168 if (cmd)
4169 mgmt_pending_remove(cmd);
4170 }
4171
4172 unlock:
4173 hci_dev_unlock(hdev);
4174
4175 return err;
4176 }
4177
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4178 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4179 u16 len)
4180 {
4181 int err = MGMT_STATUS_SUCCESS;
4182 struct mgmt_cp_set_blocked_keys *keys = data;
4183 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4184 sizeof(struct mgmt_blocked_key_info));
4185 u16 key_count, expected_len;
4186 int i;
4187
4188 bt_dev_dbg(hdev, "sock %p", sk);
4189
4190 key_count = __le16_to_cpu(keys->key_count);
4191 if (key_count > max_key_count) {
4192 bt_dev_err(hdev, "too big key_count value %u", key_count);
4193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4194 MGMT_STATUS_INVALID_PARAMS);
4195 }
4196
4197 expected_len = struct_size(keys, keys, key_count);
4198 if (expected_len != len) {
4199 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4200 expected_len, len);
4201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4202 MGMT_STATUS_INVALID_PARAMS);
4203 }
4204
4205 hci_dev_lock(hdev);
4206
4207 hci_blocked_keys_clear(hdev);
4208
4209 for (i = 0; i < key_count; ++i) {
4210 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4211
4212 if (!b) {
4213 err = MGMT_STATUS_NO_RESOURCES;
4214 break;
4215 }
4216
4217 b->type = keys->keys[i].type;
4218 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4219 list_add_rcu(&b->list, &hdev->blocked_keys);
4220 }
4221 hci_dev_unlock(hdev);
4222
4223 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4224 err, NULL, 0);
4225 }
4226
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4227 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4228 void *data, u16 len)
4229 {
4230 struct mgmt_mode *cp = data;
4231 int err;
4232 bool changed = false;
4233
4234 bt_dev_dbg(hdev, "sock %p", sk);
4235
4236 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4237 return mgmt_cmd_status(sk, hdev->id,
4238 MGMT_OP_SET_WIDEBAND_SPEECH,
4239 MGMT_STATUS_NOT_SUPPORTED);
4240
4241 if (cp->val != 0x00 && cp->val != 0x01)
4242 return mgmt_cmd_status(sk, hdev->id,
4243 MGMT_OP_SET_WIDEBAND_SPEECH,
4244 MGMT_STATUS_INVALID_PARAMS);
4245
4246 hci_dev_lock(hdev);
4247
4248 if (hdev_is_powered(hdev) &&
4249 !!cp->val != hci_dev_test_flag(hdev,
4250 HCI_WIDEBAND_SPEECH_ENABLED)) {
4251 err = mgmt_cmd_status(sk, hdev->id,
4252 MGMT_OP_SET_WIDEBAND_SPEECH,
4253 MGMT_STATUS_REJECTED);
4254 goto unlock;
4255 }
4256
4257 if (cp->val)
4258 changed = !hci_dev_test_and_set_flag(hdev,
4259 HCI_WIDEBAND_SPEECH_ENABLED);
4260 else
4261 changed = hci_dev_test_and_clear_flag(hdev,
4262 HCI_WIDEBAND_SPEECH_ENABLED);
4263
4264 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4265 if (err < 0)
4266 goto unlock;
4267
4268 if (changed)
4269 err = new_settings(hdev, sk);
4270
4271 unlock:
4272 hci_dev_unlock(hdev);
4273 return err;
4274 }
4275
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4276 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4277 void *data, u16 data_len)
4278 {
4279 char buf[20];
4280 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4281 u16 cap_len = 0;
4282 u8 flags = 0;
4283 u8 tx_power_range[2];
4284
4285 bt_dev_dbg(hdev, "sock %p", sk);
4286
4287 memset(&buf, 0, sizeof(buf));
4288
4289 hci_dev_lock(hdev);
4290
4291 /* When the Read Simple Pairing Options command is supported, then
4292 * the remote public key validation is supported.
4293 *
4294 * Alternatively, when Microsoft extensions are available, they can
4295 * indicate support for public key validation as well.
4296 */
4297 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4298 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4299
4300 flags |= 0x02; /* Remote public key validation (LE) */
4301
4302 /* When the Read Encryption Key Size command is supported, then the
4303 * encryption key size is enforced.
4304 */
4305 if (hdev->commands[20] & 0x10)
4306 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4307
4308 flags |= 0x08; /* Encryption key size enforcement (LE) */
4309
4310 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4311 &flags, 1);
4312
4313 /* When the Read Simple Pairing Options command is supported, then
4314 * also max encryption key size information is provided.
4315 */
4316 if (hdev->commands[41] & 0x08)
4317 cap_len = eir_append_le16(rp->cap, cap_len,
4318 MGMT_CAP_MAX_ENC_KEY_SIZE,
4319 hdev->max_enc_key_size);
4320
4321 cap_len = eir_append_le16(rp->cap, cap_len,
4322 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4323 SMP_MAX_ENC_KEY_SIZE);
4324
4325 /* Append the min/max LE tx power parameters if we were able to fetch
4326 * it from the controller
4327 */
4328 if (hdev->commands[38] & 0x80) {
4329 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4330 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4331 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4332 tx_power_range, 2);
4333 }
4334
4335 rp->cap_len = cpu_to_le16(cap_len);
4336
4337 hci_dev_unlock(hdev);
4338
4339 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4340 rp, sizeof(*rp) + cap_len);
4341 }
4342
4343 #ifdef CONFIG_BT_FEATURE_DEBUG
4344 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4345 static const u8 debug_uuid[16] = {
4346 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4347 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4348 };
4349 #endif
4350
4351 /* 330859bc-7506-492d-9370-9a6f0614037f */
4352 static const u8 quality_report_uuid[16] = {
4353 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4354 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4355 };
4356
4357 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4358 static const u8 offload_codecs_uuid[16] = {
4359 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4360 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4361 };
4362
4363 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4364 static const u8 le_simultaneous_roles_uuid[16] = {
4365 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4366 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4367 };
4368
4369 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4370 static const u8 rpa_resolution_uuid[16] = {
4371 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4372 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4373 };
4374
4375 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4376 static const u8 iso_socket_uuid[16] = {
4377 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4378 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4379 };
4380
4381 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4382 static const u8 mgmt_mesh_uuid[16] = {
4383 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4384 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4385 };
4386
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4387 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4388 void *data, u16 data_len)
4389 {
4390 struct mgmt_rp_read_exp_features_info *rp;
4391 size_t len;
4392 u16 idx = 0;
4393 u32 flags;
4394 int status;
4395
4396 bt_dev_dbg(hdev, "sock %p", sk);
4397
4398 /* Enough space for 7 features */
4399 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4400 rp = kzalloc(len, GFP_KERNEL);
4401 if (!rp)
4402 return -ENOMEM;
4403
4404 #ifdef CONFIG_BT_FEATURE_DEBUG
4405 if (!hdev) {
4406 flags = bt_dbg_get() ? BIT(0) : 0;
4407
4408 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4409 rp->features[idx].flags = cpu_to_le32(flags);
4410 idx++;
4411 }
4412 #endif
4413
4414 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4415 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4416 flags = BIT(0);
4417 else
4418 flags = 0;
4419
4420 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4421 rp->features[idx].flags = cpu_to_le32(flags);
4422 idx++;
4423 }
4424
4425 if (hdev && ll_privacy_capable(hdev)) {
4426 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4427 flags = BIT(0) | BIT(1);
4428 else
4429 flags = BIT(1);
4430
4431 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4432 rp->features[idx].flags = cpu_to_le32(flags);
4433 idx++;
4434 }
4435
4436 if (hdev && (aosp_has_quality_report(hdev) ||
4437 hdev->set_quality_report)) {
4438 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4439 flags = BIT(0);
4440 else
4441 flags = 0;
4442
4443 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4444 rp->features[idx].flags = cpu_to_le32(flags);
4445 idx++;
4446 }
4447
4448 if (hdev && hdev->get_data_path_id) {
4449 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4450 flags = BIT(0);
4451 else
4452 flags = 0;
4453
4454 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4455 rp->features[idx].flags = cpu_to_le32(flags);
4456 idx++;
4457 }
4458
4459 if (IS_ENABLED(CONFIG_BT_LE)) {
4460 flags = iso_enabled() ? BIT(0) : 0;
4461 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4462 rp->features[idx].flags = cpu_to_le32(flags);
4463 idx++;
4464 }
4465
4466 if (hdev && lmp_le_capable(hdev)) {
4467 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4468 flags = BIT(0);
4469 else
4470 flags = 0;
4471
4472 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4473 rp->features[idx].flags = cpu_to_le32(flags);
4474 idx++;
4475 }
4476
4477 rp->feature_count = cpu_to_le16(idx);
4478
4479 /* After reading the experimental features information, enable
4480 * the events to update client on any future change.
4481 */
4482 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4483
4484 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4485 MGMT_OP_READ_EXP_FEATURES_INFO,
4486 0, rp, sizeof(*rp) + (20 * idx));
4487
4488 kfree(rp);
4489 return status;
4490 }
4491
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4492 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4493 struct sock *skip)
4494 {
4495 struct mgmt_ev_exp_feature_changed ev;
4496
4497 memset(&ev, 0, sizeof(ev));
4498 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4499 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4500
4501 // Do we need to be atomic with the conn_flags?
4502 if (enabled && privacy_mode_capable(hdev))
4503 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4504 else
4505 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4506
4507 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4508 &ev, sizeof(ev),
4509 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4510
4511 }
4512
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4513 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4514 bool enabled, struct sock *skip)
4515 {
4516 struct mgmt_ev_exp_feature_changed ev;
4517
4518 memset(&ev, 0, sizeof(ev));
4519 memcpy(ev.uuid, uuid, 16);
4520 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4521
4522 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4523 &ev, sizeof(ev),
4524 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4525 }
4526
4527 #define EXP_FEAT(_uuid, _set_func) \
4528 { \
4529 .uuid = _uuid, \
4530 .set_func = _set_func, \
4531 }
4532
4533 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4534 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4535 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4536 {
4537 struct mgmt_rp_set_exp_feature rp;
4538
4539 memset(rp.uuid, 0, 16);
4540 rp.flags = cpu_to_le32(0);
4541
4542 #ifdef CONFIG_BT_FEATURE_DEBUG
4543 if (!hdev) {
4544 bool changed = bt_dbg_get();
4545
4546 bt_dbg_set(false);
4547
4548 if (changed)
4549 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4550 }
4551 #endif
4552
4553 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4554 bool changed;
4555
4556 changed = hci_dev_test_and_clear_flag(hdev,
4557 HCI_ENABLE_LL_PRIVACY);
4558 if (changed)
4559 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4560 sk);
4561 }
4562
4563 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4564
4565 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4566 MGMT_OP_SET_EXP_FEATURE, 0,
4567 &rp, sizeof(rp));
4568 }
4569
4570 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4571 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4572 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4573 {
4574 struct mgmt_rp_set_exp_feature rp;
4575
4576 bool val, changed;
4577 int err;
4578
4579 /* Command requires to use the non-controller index */
4580 if (hdev)
4581 return mgmt_cmd_status(sk, hdev->id,
4582 MGMT_OP_SET_EXP_FEATURE,
4583 MGMT_STATUS_INVALID_INDEX);
4584
4585 /* Parameters are limited to a single octet */
4586 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4587 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4588 MGMT_OP_SET_EXP_FEATURE,
4589 MGMT_STATUS_INVALID_PARAMS);
4590
4591 /* Only boolean on/off is supported */
4592 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4593 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4594 MGMT_OP_SET_EXP_FEATURE,
4595 MGMT_STATUS_INVALID_PARAMS);
4596
4597 val = !!cp->param[0];
4598 changed = val ? !bt_dbg_get() : bt_dbg_get();
4599 bt_dbg_set(val);
4600
4601 memcpy(rp.uuid, debug_uuid, 16);
4602 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4603
4604 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4605
4606 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4607 MGMT_OP_SET_EXP_FEATURE, 0,
4608 &rp, sizeof(rp));
4609
4610 if (changed)
4611 exp_feature_changed(hdev, debug_uuid, val, sk);
4612
4613 return err;
4614 }
4615 #endif
4616
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4617 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4618 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4619 {
4620 struct mgmt_rp_set_exp_feature rp;
4621 bool val, changed;
4622 int err;
4623
4624 /* Command requires to use the controller index */
4625 if (!hdev)
4626 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4627 MGMT_OP_SET_EXP_FEATURE,
4628 MGMT_STATUS_INVALID_INDEX);
4629
4630 /* Parameters are limited to a single octet */
4631 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4632 return mgmt_cmd_status(sk, hdev->id,
4633 MGMT_OP_SET_EXP_FEATURE,
4634 MGMT_STATUS_INVALID_PARAMS);
4635
4636 /* Only boolean on/off is supported */
4637 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4638 return mgmt_cmd_status(sk, hdev->id,
4639 MGMT_OP_SET_EXP_FEATURE,
4640 MGMT_STATUS_INVALID_PARAMS);
4641
4642 val = !!cp->param[0];
4643
4644 if (val) {
4645 changed = !hci_dev_test_and_set_flag(hdev,
4646 HCI_MESH_EXPERIMENTAL);
4647 } else {
4648 hci_dev_clear_flag(hdev, HCI_MESH);
4649 changed = hci_dev_test_and_clear_flag(hdev,
4650 HCI_MESH_EXPERIMENTAL);
4651 }
4652
4653 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4654 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4655
4656 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4657
4658 err = mgmt_cmd_complete(sk, hdev->id,
4659 MGMT_OP_SET_EXP_FEATURE, 0,
4660 &rp, sizeof(rp));
4661
4662 if (changed)
4663 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4664
4665 return err;
4666 }
4667
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4668 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4669 struct mgmt_cp_set_exp_feature *cp,
4670 u16 data_len)
4671 {
4672 struct mgmt_rp_set_exp_feature rp;
4673 bool val, changed;
4674 int err;
4675 u32 flags;
4676
4677 /* Command requires to use the controller index */
4678 if (!hdev)
4679 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4680 MGMT_OP_SET_EXP_FEATURE,
4681 MGMT_STATUS_INVALID_INDEX);
4682
4683 /* Changes can only be made when controller is powered down */
4684 if (hdev_is_powered(hdev))
4685 return mgmt_cmd_status(sk, hdev->id,
4686 MGMT_OP_SET_EXP_FEATURE,
4687 MGMT_STATUS_REJECTED);
4688
4689 /* Parameters are limited to a single octet */
4690 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4691 return mgmt_cmd_status(sk, hdev->id,
4692 MGMT_OP_SET_EXP_FEATURE,
4693 MGMT_STATUS_INVALID_PARAMS);
4694
4695 /* Only boolean on/off is supported */
4696 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4697 return mgmt_cmd_status(sk, hdev->id,
4698 MGMT_OP_SET_EXP_FEATURE,
4699 MGMT_STATUS_INVALID_PARAMS);
4700
4701 val = !!cp->param[0];
4702
4703 if (val) {
4704 changed = !hci_dev_test_and_set_flag(hdev,
4705 HCI_ENABLE_LL_PRIVACY);
4706 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4707
4708 /* Enable LL privacy + supported settings changed */
4709 flags = BIT(0) | BIT(1);
4710 } else {
4711 changed = hci_dev_test_and_clear_flag(hdev,
4712 HCI_ENABLE_LL_PRIVACY);
4713
4714 /* Disable LL privacy + supported settings changed */
4715 flags = BIT(1);
4716 }
4717
4718 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4719 rp.flags = cpu_to_le32(flags);
4720
4721 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4722
4723 err = mgmt_cmd_complete(sk, hdev->id,
4724 MGMT_OP_SET_EXP_FEATURE, 0,
4725 &rp, sizeof(rp));
4726
4727 if (changed)
4728 exp_ll_privacy_feature_changed(val, hdev, sk);
4729
4730 return err;
4731 }
4732
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4733 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4734 struct mgmt_cp_set_exp_feature *cp,
4735 u16 data_len)
4736 {
4737 struct mgmt_rp_set_exp_feature rp;
4738 bool val, changed;
4739 int err;
4740
4741 /* Command requires to use a valid controller index */
4742 if (!hdev)
4743 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4744 MGMT_OP_SET_EXP_FEATURE,
4745 MGMT_STATUS_INVALID_INDEX);
4746
4747 /* Parameters are limited to a single octet */
4748 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4749 return mgmt_cmd_status(sk, hdev->id,
4750 MGMT_OP_SET_EXP_FEATURE,
4751 MGMT_STATUS_INVALID_PARAMS);
4752
4753 /* Only boolean on/off is supported */
4754 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4755 return mgmt_cmd_status(sk, hdev->id,
4756 MGMT_OP_SET_EXP_FEATURE,
4757 MGMT_STATUS_INVALID_PARAMS);
4758
4759 hci_req_sync_lock(hdev);
4760
4761 val = !!cp->param[0];
4762 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4763
4764 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4765 err = mgmt_cmd_status(sk, hdev->id,
4766 MGMT_OP_SET_EXP_FEATURE,
4767 MGMT_STATUS_NOT_SUPPORTED);
4768 goto unlock_quality_report;
4769 }
4770
4771 if (changed) {
4772 if (hdev->set_quality_report)
4773 err = hdev->set_quality_report(hdev, val);
4774 else
4775 err = aosp_set_quality_report(hdev, val);
4776
4777 if (err) {
4778 err = mgmt_cmd_status(sk, hdev->id,
4779 MGMT_OP_SET_EXP_FEATURE,
4780 MGMT_STATUS_FAILED);
4781 goto unlock_quality_report;
4782 }
4783
4784 if (val)
4785 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4786 else
4787 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4788 }
4789
4790 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4791
4792 memcpy(rp.uuid, quality_report_uuid, 16);
4793 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4794 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4795
4796 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4797 &rp, sizeof(rp));
4798
4799 if (changed)
4800 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4801
4802 unlock_quality_report:
4803 hci_req_sync_unlock(hdev);
4804 return err;
4805 }
4806
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4807 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4808 struct mgmt_cp_set_exp_feature *cp,
4809 u16 data_len)
4810 {
4811 bool val, changed;
4812 int err;
4813 struct mgmt_rp_set_exp_feature rp;
4814
4815 /* Command requires to use a valid controller index */
4816 if (!hdev)
4817 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4818 MGMT_OP_SET_EXP_FEATURE,
4819 MGMT_STATUS_INVALID_INDEX);
4820
4821 /* Parameters are limited to a single octet */
4822 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4823 return mgmt_cmd_status(sk, hdev->id,
4824 MGMT_OP_SET_EXP_FEATURE,
4825 MGMT_STATUS_INVALID_PARAMS);
4826
4827 /* Only boolean on/off is supported */
4828 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4829 return mgmt_cmd_status(sk, hdev->id,
4830 MGMT_OP_SET_EXP_FEATURE,
4831 MGMT_STATUS_INVALID_PARAMS);
4832
4833 val = !!cp->param[0];
4834 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4835
4836 if (!hdev->get_data_path_id) {
4837 return mgmt_cmd_status(sk, hdev->id,
4838 MGMT_OP_SET_EXP_FEATURE,
4839 MGMT_STATUS_NOT_SUPPORTED);
4840 }
4841
4842 if (changed) {
4843 if (val)
4844 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4845 else
4846 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4847 }
4848
4849 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4850 val, changed);
4851
4852 memcpy(rp.uuid, offload_codecs_uuid, 16);
4853 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4854 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4855 err = mgmt_cmd_complete(sk, hdev->id,
4856 MGMT_OP_SET_EXP_FEATURE, 0,
4857 &rp, sizeof(rp));
4858
4859 if (changed)
4860 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4861
4862 return err;
4863 }
4864
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4865 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4866 struct mgmt_cp_set_exp_feature *cp,
4867 u16 data_len)
4868 {
4869 bool val, changed;
4870 int err;
4871 struct mgmt_rp_set_exp_feature rp;
4872
4873 /* Command requires to use a valid controller index */
4874 if (!hdev)
4875 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4876 MGMT_OP_SET_EXP_FEATURE,
4877 MGMT_STATUS_INVALID_INDEX);
4878
4879 /* Parameters are limited to a single octet */
4880 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4881 return mgmt_cmd_status(sk, hdev->id,
4882 MGMT_OP_SET_EXP_FEATURE,
4883 MGMT_STATUS_INVALID_PARAMS);
4884
4885 /* Only boolean on/off is supported */
4886 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4887 return mgmt_cmd_status(sk, hdev->id,
4888 MGMT_OP_SET_EXP_FEATURE,
4889 MGMT_STATUS_INVALID_PARAMS);
4890
4891 val = !!cp->param[0];
4892 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4893
4894 if (!hci_dev_le_state_simultaneous(hdev)) {
4895 return mgmt_cmd_status(sk, hdev->id,
4896 MGMT_OP_SET_EXP_FEATURE,
4897 MGMT_STATUS_NOT_SUPPORTED);
4898 }
4899
4900 if (changed) {
4901 if (val)
4902 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4903 else
4904 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4905 }
4906
4907 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4908 val, changed);
4909
4910 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4911 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4912 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4913 err = mgmt_cmd_complete(sk, hdev->id,
4914 MGMT_OP_SET_EXP_FEATURE, 0,
4915 &rp, sizeof(rp));
4916
4917 if (changed)
4918 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4919
4920 return err;
4921 }
4922
4923 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4924 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4925 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4926 {
4927 struct mgmt_rp_set_exp_feature rp;
4928 bool val, changed = false;
4929 int err;
4930
4931 /* Command requires to use the non-controller index */
4932 if (hdev)
4933 return mgmt_cmd_status(sk, hdev->id,
4934 MGMT_OP_SET_EXP_FEATURE,
4935 MGMT_STATUS_INVALID_INDEX);
4936
4937 /* Parameters are limited to a single octet */
4938 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4939 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4940 MGMT_OP_SET_EXP_FEATURE,
4941 MGMT_STATUS_INVALID_PARAMS);
4942
4943 /* Only boolean on/off is supported */
4944 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4945 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4946 MGMT_OP_SET_EXP_FEATURE,
4947 MGMT_STATUS_INVALID_PARAMS);
4948
4949 val = cp->param[0] ? true : false;
4950 if (val)
4951 err = iso_init();
4952 else
4953 err = iso_exit();
4954
4955 if (!err)
4956 changed = true;
4957
4958 memcpy(rp.uuid, iso_socket_uuid, 16);
4959 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4960
4961 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4962
4963 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4964 MGMT_OP_SET_EXP_FEATURE, 0,
4965 &rp, sizeof(rp));
4966
4967 if (changed)
4968 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4969
4970 return err;
4971 }
4972 #endif
4973
4974 static const struct mgmt_exp_feature {
4975 const u8 *uuid;
4976 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4977 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4978 } exp_features[] = {
4979 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4980 #ifdef CONFIG_BT_FEATURE_DEBUG
4981 EXP_FEAT(debug_uuid, set_debug_func),
4982 #endif
4983 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4984 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4985 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4986 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4987 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4988 #ifdef CONFIG_BT_LE
4989 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4990 #endif
4991
4992 /* end with a null feature */
4993 EXP_FEAT(NULL, NULL)
4994 };
4995
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4996 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4997 void *data, u16 data_len)
4998 {
4999 struct mgmt_cp_set_exp_feature *cp = data;
5000 size_t i = 0;
5001
5002 bt_dev_dbg(hdev, "sock %p", sk);
5003
5004 for (i = 0; exp_features[i].uuid; i++) {
5005 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5006 return exp_features[i].set_func(sk, hdev, cp, data_len);
5007 }
5008
5009 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5010 MGMT_OP_SET_EXP_FEATURE,
5011 MGMT_STATUS_NOT_SUPPORTED);
5012 }
5013
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)5014 static u32 get_params_flags(struct hci_dev *hdev,
5015 struct hci_conn_params *params)
5016 {
5017 u32 flags = hdev->conn_flags;
5018
5019 /* Devices using RPAs can only be programmed in the acceptlist if
5020 * LL Privacy has been enable otherwise they cannot mark
5021 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5022 */
5023 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5024 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5025 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5026
5027 return flags;
5028 }
5029
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5030 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5031 u16 data_len)
5032 {
5033 struct mgmt_cp_get_device_flags *cp = data;
5034 struct mgmt_rp_get_device_flags rp;
5035 struct bdaddr_list_with_flags *br_params;
5036 struct hci_conn_params *params;
5037 u32 supported_flags;
5038 u32 current_flags = 0;
5039 u8 status = MGMT_STATUS_INVALID_PARAMS;
5040
5041 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5042 &cp->addr.bdaddr, cp->addr.type);
5043
5044 hci_dev_lock(hdev);
5045
5046 supported_flags = hdev->conn_flags;
5047
5048 memset(&rp, 0, sizeof(rp));
5049
5050 if (cp->addr.type == BDADDR_BREDR) {
5051 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5052 &cp->addr.bdaddr,
5053 cp->addr.type);
5054 if (!br_params)
5055 goto done;
5056
5057 current_flags = br_params->flags;
5058 } else {
5059 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5060 le_addr_type(cp->addr.type));
5061 if (!params)
5062 goto done;
5063
5064 supported_flags = get_params_flags(hdev, params);
5065 current_flags = params->flags;
5066 }
5067
5068 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5069 rp.addr.type = cp->addr.type;
5070 rp.supported_flags = cpu_to_le32(supported_flags);
5071 rp.current_flags = cpu_to_le32(current_flags);
5072
5073 status = MGMT_STATUS_SUCCESS;
5074
5075 done:
5076 hci_dev_unlock(hdev);
5077
5078 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5079 &rp, sizeof(rp));
5080 }
5081
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5082 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5083 bdaddr_t *bdaddr, u8 bdaddr_type,
5084 u32 supported_flags, u32 current_flags)
5085 {
5086 struct mgmt_ev_device_flags_changed ev;
5087
5088 bacpy(&ev.addr.bdaddr, bdaddr);
5089 ev.addr.type = bdaddr_type;
5090 ev.supported_flags = cpu_to_le32(supported_flags);
5091 ev.current_flags = cpu_to_le32(current_flags);
5092
5093 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5094 }
5095
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5096 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5097 u16 len)
5098 {
5099 struct mgmt_cp_set_device_flags *cp = data;
5100 struct bdaddr_list_with_flags *br_params;
5101 struct hci_conn_params *params;
5102 u8 status = MGMT_STATUS_INVALID_PARAMS;
5103 u32 supported_flags;
5104 u32 current_flags = __le32_to_cpu(cp->current_flags);
5105
5106 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5107 &cp->addr.bdaddr, cp->addr.type, current_flags);
5108
5109 // We should take hci_dev_lock() early, I think.. conn_flags can change
5110 supported_flags = hdev->conn_flags;
5111
5112 if ((supported_flags | current_flags) != supported_flags) {
5113 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5114 current_flags, supported_flags);
5115 goto done;
5116 }
5117
5118 hci_dev_lock(hdev);
5119
5120 if (cp->addr.type == BDADDR_BREDR) {
5121 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5122 &cp->addr.bdaddr,
5123 cp->addr.type);
5124
5125 if (br_params) {
5126 br_params->flags = current_flags;
5127 status = MGMT_STATUS_SUCCESS;
5128 } else {
5129 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5130 &cp->addr.bdaddr, cp->addr.type);
5131 }
5132
5133 goto unlock;
5134 }
5135
5136 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5137 le_addr_type(cp->addr.type));
5138 if (!params) {
5139 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5140 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5141 goto unlock;
5142 }
5143
5144 supported_flags = get_params_flags(hdev, params);
5145
5146 if ((supported_flags | current_flags) != supported_flags) {
5147 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5148 current_flags, supported_flags);
5149 goto unlock;
5150 }
5151
5152 WRITE_ONCE(params->flags, current_flags);
5153 status = MGMT_STATUS_SUCCESS;
5154
5155 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5156 * has been set.
5157 */
5158 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5159 hci_update_passive_scan(hdev);
5160
5161 unlock:
5162 hci_dev_unlock(hdev);
5163
5164 done:
5165 if (status == MGMT_STATUS_SUCCESS)
5166 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5167 supported_flags, current_flags);
5168
5169 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5170 &cp->addr, sizeof(cp->addr));
5171 }
5172
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5173 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5174 u16 handle)
5175 {
5176 struct mgmt_ev_adv_monitor_added ev;
5177
5178 ev.monitor_handle = cpu_to_le16(handle);
5179
5180 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5181 }
5182
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5183 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5184 __le16 handle)
5185 {
5186 struct mgmt_ev_adv_monitor_removed ev;
5187
5188 ev.monitor_handle = handle;
5189
5190 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5191 }
5192
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5193 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5194 void *data, u16 len)
5195 {
5196 struct adv_monitor *monitor = NULL;
5197 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5198 int handle, err;
5199 size_t rp_size = 0;
5200 __u32 supported = 0;
5201 __u32 enabled = 0;
5202 __u16 num_handles = 0;
5203 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5204
5205 BT_DBG("request for %s", hdev->name);
5206
5207 hci_dev_lock(hdev);
5208
5209 if (msft_monitor_supported(hdev))
5210 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5211
5212 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5213 handles[num_handles++] = monitor->handle;
5214
5215 hci_dev_unlock(hdev);
5216
5217 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5218 rp = kmalloc(rp_size, GFP_KERNEL);
5219 if (!rp)
5220 return -ENOMEM;
5221
5222 /* All supported features are currently enabled */
5223 enabled = supported;
5224
5225 rp->supported_features = cpu_to_le32(supported);
5226 rp->enabled_features = cpu_to_le32(enabled);
5227 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5228 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5229 rp->num_handles = cpu_to_le16(num_handles);
5230 if (num_handles)
5231 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5232
5233 err = mgmt_cmd_complete(sk, hdev->id,
5234 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5235 MGMT_STATUS_SUCCESS, rp, rp_size);
5236
5237 kfree(rp);
5238
5239 return err;
5240 }
5241
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5242 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5243 void *data, int status)
5244 {
5245 struct mgmt_rp_add_adv_patterns_monitor rp;
5246 struct mgmt_pending_cmd *cmd = data;
5247 struct adv_monitor *monitor = cmd->user_data;
5248
5249 hci_dev_lock(hdev);
5250
5251 rp.monitor_handle = cpu_to_le16(monitor->handle);
5252
5253 if (!status) {
5254 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5255 hdev->adv_monitors_cnt++;
5256 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5257 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5258 hci_update_passive_scan(hdev);
5259 }
5260
5261 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5262 mgmt_status(status), &rp, sizeof(rp));
5263 mgmt_pending_remove(cmd);
5264
5265 hci_dev_unlock(hdev);
5266 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5267 rp.monitor_handle, status);
5268 }
5269
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5270 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5271 {
5272 struct mgmt_pending_cmd *cmd = data;
5273 struct adv_monitor *monitor = cmd->user_data;
5274
5275 return hci_add_adv_monitor(hdev, monitor);
5276 }
5277
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5278 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5279 struct adv_monitor *m, u8 status,
5280 void *data, u16 len, u16 op)
5281 {
5282 struct mgmt_pending_cmd *cmd;
5283 int err;
5284
5285 hci_dev_lock(hdev);
5286
5287 if (status)
5288 goto unlock;
5289
5290 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5291 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5292 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5293 status = MGMT_STATUS_BUSY;
5294 goto unlock;
5295 }
5296
5297 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5298 if (!cmd) {
5299 status = MGMT_STATUS_NO_RESOURCES;
5300 goto unlock;
5301 }
5302
5303 cmd->user_data = m;
5304 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5305 mgmt_add_adv_patterns_monitor_complete);
5306 if (err) {
5307 if (err == -ENOMEM)
5308 status = MGMT_STATUS_NO_RESOURCES;
5309 else
5310 status = MGMT_STATUS_FAILED;
5311
5312 goto unlock;
5313 }
5314
5315 hci_dev_unlock(hdev);
5316
5317 return 0;
5318
5319 unlock:
5320 hci_free_adv_monitor(hdev, m);
5321 hci_dev_unlock(hdev);
5322 return mgmt_cmd_status(sk, hdev->id, op, status);
5323 }
5324
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5325 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5326 struct mgmt_adv_rssi_thresholds *rssi)
5327 {
5328 if (rssi) {
5329 m->rssi.low_threshold = rssi->low_threshold;
5330 m->rssi.low_threshold_timeout =
5331 __le16_to_cpu(rssi->low_threshold_timeout);
5332 m->rssi.high_threshold = rssi->high_threshold;
5333 m->rssi.high_threshold_timeout =
5334 __le16_to_cpu(rssi->high_threshold_timeout);
5335 m->rssi.sampling_period = rssi->sampling_period;
5336 } else {
5337 /* Default values. These numbers are the least constricting
5338 * parameters for MSFT API to work, so it behaves as if there
5339 * are no rssi parameter to consider. May need to be changed
5340 * if other API are to be supported.
5341 */
5342 m->rssi.low_threshold = -127;
5343 m->rssi.low_threshold_timeout = 60;
5344 m->rssi.high_threshold = -127;
5345 m->rssi.high_threshold_timeout = 0;
5346 m->rssi.sampling_period = 0;
5347 }
5348 }
5349
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5350 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5351 struct mgmt_adv_pattern *patterns)
5352 {
5353 u8 offset = 0, length = 0;
5354 struct adv_pattern *p = NULL;
5355 int i;
5356
5357 for (i = 0; i < pattern_count; i++) {
5358 offset = patterns[i].offset;
5359 length = patterns[i].length;
5360 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5361 length > HCI_MAX_EXT_AD_LENGTH ||
5362 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5363 return MGMT_STATUS_INVALID_PARAMS;
5364
5365 p = kmalloc(sizeof(*p), GFP_KERNEL);
5366 if (!p)
5367 return MGMT_STATUS_NO_RESOURCES;
5368
5369 p->ad_type = patterns[i].ad_type;
5370 p->offset = patterns[i].offset;
5371 p->length = patterns[i].length;
5372 memcpy(p->value, patterns[i].value, p->length);
5373
5374 INIT_LIST_HEAD(&p->list);
5375 list_add(&p->list, &m->patterns);
5376 }
5377
5378 return MGMT_STATUS_SUCCESS;
5379 }
5380
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5381 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5382 void *data, u16 len)
5383 {
5384 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5385 struct adv_monitor *m = NULL;
5386 u8 status = MGMT_STATUS_SUCCESS;
5387 size_t expected_size = sizeof(*cp);
5388
5389 BT_DBG("request for %s", hdev->name);
5390
5391 if (len <= sizeof(*cp)) {
5392 status = MGMT_STATUS_INVALID_PARAMS;
5393 goto done;
5394 }
5395
5396 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5397 if (len != expected_size) {
5398 status = MGMT_STATUS_INVALID_PARAMS;
5399 goto done;
5400 }
5401
5402 m = kzalloc(sizeof(*m), GFP_KERNEL);
5403 if (!m) {
5404 status = MGMT_STATUS_NO_RESOURCES;
5405 goto done;
5406 }
5407
5408 INIT_LIST_HEAD(&m->patterns);
5409
5410 parse_adv_monitor_rssi(m, NULL);
5411 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5412
5413 done:
5414 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5415 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5416 }
5417
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5418 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5419 void *data, u16 len)
5420 {
5421 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5422 struct adv_monitor *m = NULL;
5423 u8 status = MGMT_STATUS_SUCCESS;
5424 size_t expected_size = sizeof(*cp);
5425
5426 BT_DBG("request for %s", hdev->name);
5427
5428 if (len <= sizeof(*cp)) {
5429 status = MGMT_STATUS_INVALID_PARAMS;
5430 goto done;
5431 }
5432
5433 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5434 if (len != expected_size) {
5435 status = MGMT_STATUS_INVALID_PARAMS;
5436 goto done;
5437 }
5438
5439 m = kzalloc(sizeof(*m), GFP_KERNEL);
5440 if (!m) {
5441 status = MGMT_STATUS_NO_RESOURCES;
5442 goto done;
5443 }
5444
5445 INIT_LIST_HEAD(&m->patterns);
5446
5447 parse_adv_monitor_rssi(m, &cp->rssi);
5448 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5449
5450 done:
5451 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5452 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5453 }
5454
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5455 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5456 void *data, int status)
5457 {
5458 struct mgmt_rp_remove_adv_monitor rp;
5459 struct mgmt_pending_cmd *cmd = data;
5460 struct mgmt_cp_remove_adv_monitor *cp;
5461
5462 if (status == -ECANCELED)
5463 return;
5464
5465 hci_dev_lock(hdev);
5466
5467 cp = cmd->param;
5468
5469 rp.monitor_handle = cp->monitor_handle;
5470
5471 if (!status) {
5472 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5473 hci_update_passive_scan(hdev);
5474 }
5475
5476 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5477 mgmt_status(status), &rp, sizeof(rp));
5478 mgmt_pending_free(cmd);
5479
5480 hci_dev_unlock(hdev);
5481 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5482 rp.monitor_handle, status);
5483 }
5484
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5485 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5486 {
5487 struct mgmt_pending_cmd *cmd = data;
5488 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5489 u16 handle = __le16_to_cpu(cp->monitor_handle);
5490
5491 if (!handle)
5492 return hci_remove_all_adv_monitor(hdev);
5493
5494 return hci_remove_single_adv_monitor(hdev, handle);
5495 }
5496
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5497 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5498 void *data, u16 len)
5499 {
5500 struct mgmt_pending_cmd *cmd;
5501 int err, status;
5502
5503 hci_dev_lock(hdev);
5504
5505 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5506 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5507 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5508 status = MGMT_STATUS_BUSY;
5509 goto unlock;
5510 }
5511
5512 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5513 if (!cmd) {
5514 status = MGMT_STATUS_NO_RESOURCES;
5515 goto unlock;
5516 }
5517
5518 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5519 mgmt_remove_adv_monitor_complete);
5520
5521 if (err) {
5522 mgmt_pending_free(cmd);
5523
5524 if (err == -ENOMEM)
5525 status = MGMT_STATUS_NO_RESOURCES;
5526 else
5527 status = MGMT_STATUS_FAILED;
5528
5529 goto unlock;
5530 }
5531
5532 hci_dev_unlock(hdev);
5533
5534 return 0;
5535
5536 unlock:
5537 hci_dev_unlock(hdev);
5538 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5539 status);
5540 }
5541
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5542 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5543 {
5544 struct mgmt_rp_read_local_oob_data mgmt_rp;
5545 size_t rp_size = sizeof(mgmt_rp);
5546 struct mgmt_pending_cmd *cmd = data;
5547 struct sk_buff *skb = cmd->skb;
5548 u8 status = mgmt_status(err);
5549
5550 if (!status) {
5551 if (!skb)
5552 status = MGMT_STATUS_FAILED;
5553 else if (IS_ERR(skb))
5554 status = mgmt_status(PTR_ERR(skb));
5555 else
5556 status = mgmt_status(skb->data[0]);
5557 }
5558
5559 bt_dev_dbg(hdev, "status %d", status);
5560
5561 if (status) {
5562 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5563 goto remove;
5564 }
5565
5566 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5567
5568 if (!bredr_sc_enabled(hdev)) {
5569 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5570
5571 if (skb->len < sizeof(*rp)) {
5572 mgmt_cmd_status(cmd->sk, hdev->id,
5573 MGMT_OP_READ_LOCAL_OOB_DATA,
5574 MGMT_STATUS_FAILED);
5575 goto remove;
5576 }
5577
5578 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5579 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5580
5581 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5582 } else {
5583 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5584
5585 if (skb->len < sizeof(*rp)) {
5586 mgmt_cmd_status(cmd->sk, hdev->id,
5587 MGMT_OP_READ_LOCAL_OOB_DATA,
5588 MGMT_STATUS_FAILED);
5589 goto remove;
5590 }
5591
5592 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5593 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5594
5595 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5596 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5597 }
5598
5599 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5600 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5601
5602 remove:
5603 if (skb && !IS_ERR(skb))
5604 kfree_skb(skb);
5605
5606 mgmt_pending_free(cmd);
5607 }
5608
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5609 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5610 {
5611 struct mgmt_pending_cmd *cmd = data;
5612
5613 if (bredr_sc_enabled(hdev))
5614 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5615 else
5616 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5617
5618 if (IS_ERR(cmd->skb))
5619 return PTR_ERR(cmd->skb);
5620 else
5621 return 0;
5622 }
5623
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5624 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5625 void *data, u16 data_len)
5626 {
5627 struct mgmt_pending_cmd *cmd;
5628 int err;
5629
5630 bt_dev_dbg(hdev, "sock %p", sk);
5631
5632 hci_dev_lock(hdev);
5633
5634 if (!hdev_is_powered(hdev)) {
5635 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5636 MGMT_STATUS_NOT_POWERED);
5637 goto unlock;
5638 }
5639
5640 if (!lmp_ssp_capable(hdev)) {
5641 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5642 MGMT_STATUS_NOT_SUPPORTED);
5643 goto unlock;
5644 }
5645
5646 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5647 if (!cmd)
5648 err = -ENOMEM;
5649 else
5650 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5651 read_local_oob_data_complete);
5652
5653 if (err < 0) {
5654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5655 MGMT_STATUS_FAILED);
5656
5657 if (cmd)
5658 mgmt_pending_free(cmd);
5659 }
5660
5661 unlock:
5662 hci_dev_unlock(hdev);
5663 return err;
5664 }
5665
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5666 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5667 void *data, u16 len)
5668 {
5669 struct mgmt_addr_info *addr = data;
5670 int err;
5671
5672 bt_dev_dbg(hdev, "sock %p", sk);
5673
5674 if (!bdaddr_type_is_valid(addr->type))
5675 return mgmt_cmd_complete(sk, hdev->id,
5676 MGMT_OP_ADD_REMOTE_OOB_DATA,
5677 MGMT_STATUS_INVALID_PARAMS,
5678 addr, sizeof(*addr));
5679
5680 hci_dev_lock(hdev);
5681
5682 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5683 struct mgmt_cp_add_remote_oob_data *cp = data;
5684 u8 status;
5685
5686 if (cp->addr.type != BDADDR_BREDR) {
5687 err = mgmt_cmd_complete(sk, hdev->id,
5688 MGMT_OP_ADD_REMOTE_OOB_DATA,
5689 MGMT_STATUS_INVALID_PARAMS,
5690 &cp->addr, sizeof(cp->addr));
5691 goto unlock;
5692 }
5693
5694 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5695 cp->addr.type, cp->hash,
5696 cp->rand, NULL, NULL);
5697 if (err < 0)
5698 status = MGMT_STATUS_FAILED;
5699 else
5700 status = MGMT_STATUS_SUCCESS;
5701
5702 err = mgmt_cmd_complete(sk, hdev->id,
5703 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5704 &cp->addr, sizeof(cp->addr));
5705 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5706 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5707 u8 *rand192, *hash192, *rand256, *hash256;
5708 u8 status;
5709
5710 if (bdaddr_type_is_le(cp->addr.type)) {
5711 /* Enforce zero-valued 192-bit parameters as
5712 * long as legacy SMP OOB isn't implemented.
5713 */
5714 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5715 memcmp(cp->hash192, ZERO_KEY, 16)) {
5716 err = mgmt_cmd_complete(sk, hdev->id,
5717 MGMT_OP_ADD_REMOTE_OOB_DATA,
5718 MGMT_STATUS_INVALID_PARAMS,
5719 addr, sizeof(*addr));
5720 goto unlock;
5721 }
5722
5723 rand192 = NULL;
5724 hash192 = NULL;
5725 } else {
5726 /* In case one of the P-192 values is set to zero,
5727 * then just disable OOB data for P-192.
5728 */
5729 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5730 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5731 rand192 = NULL;
5732 hash192 = NULL;
5733 } else {
5734 rand192 = cp->rand192;
5735 hash192 = cp->hash192;
5736 }
5737 }
5738
5739 /* In case one of the P-256 values is set to zero, then just
5740 * disable OOB data for P-256.
5741 */
5742 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5743 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5744 rand256 = NULL;
5745 hash256 = NULL;
5746 } else {
5747 rand256 = cp->rand256;
5748 hash256 = cp->hash256;
5749 }
5750
5751 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5752 cp->addr.type, hash192, rand192,
5753 hash256, rand256);
5754 if (err < 0)
5755 status = MGMT_STATUS_FAILED;
5756 else
5757 status = MGMT_STATUS_SUCCESS;
5758
5759 err = mgmt_cmd_complete(sk, hdev->id,
5760 MGMT_OP_ADD_REMOTE_OOB_DATA,
5761 status, &cp->addr, sizeof(cp->addr));
5762 } else {
5763 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5764 len);
5765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5766 MGMT_STATUS_INVALID_PARAMS);
5767 }
5768
5769 unlock:
5770 hci_dev_unlock(hdev);
5771 return err;
5772 }
5773
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5774 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5775 void *data, u16 len)
5776 {
5777 struct mgmt_cp_remove_remote_oob_data *cp = data;
5778 u8 status;
5779 int err;
5780
5781 bt_dev_dbg(hdev, "sock %p", sk);
5782
5783 if (cp->addr.type != BDADDR_BREDR)
5784 return mgmt_cmd_complete(sk, hdev->id,
5785 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5786 MGMT_STATUS_INVALID_PARAMS,
5787 &cp->addr, sizeof(cp->addr));
5788
5789 hci_dev_lock(hdev);
5790
5791 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5792 hci_remote_oob_data_clear(hdev);
5793 status = MGMT_STATUS_SUCCESS;
5794 goto done;
5795 }
5796
5797 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5798 if (err < 0)
5799 status = MGMT_STATUS_INVALID_PARAMS;
5800 else
5801 status = MGMT_STATUS_SUCCESS;
5802
5803 done:
5804 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5805 status, &cp->addr, sizeof(cp->addr));
5806
5807 hci_dev_unlock(hdev);
5808 return err;
5809 }
5810
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5811 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5812 {
5813 struct mgmt_pending_cmd *cmd;
5814
5815 bt_dev_dbg(hdev, "status %u", status);
5816
5817 hci_dev_lock(hdev);
5818
5819 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5820 if (!cmd)
5821 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5822
5823 if (!cmd)
5824 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5825
5826 if (cmd) {
5827 cmd->cmd_complete(cmd, mgmt_status(status));
5828 mgmt_pending_remove(cmd);
5829 }
5830
5831 hci_dev_unlock(hdev);
5832 }
5833
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5834 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5835 uint8_t *mgmt_status)
5836 {
5837 switch (type) {
5838 case DISCOV_TYPE_LE:
5839 *mgmt_status = mgmt_le_support(hdev);
5840 if (*mgmt_status)
5841 return false;
5842 break;
5843 case DISCOV_TYPE_INTERLEAVED:
5844 *mgmt_status = mgmt_le_support(hdev);
5845 if (*mgmt_status)
5846 return false;
5847 fallthrough;
5848 case DISCOV_TYPE_BREDR:
5849 *mgmt_status = mgmt_bredr_support(hdev);
5850 if (*mgmt_status)
5851 return false;
5852 break;
5853 default:
5854 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5855 return false;
5856 }
5857
5858 return true;
5859 }
5860
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5861 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5862 {
5863 struct mgmt_pending_cmd *cmd = data;
5864
5865 bt_dev_dbg(hdev, "err %d", err);
5866
5867 if (err == -ECANCELED)
5868 return;
5869
5870 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5871 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5872 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5873 return;
5874
5875 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5876 cmd->param, 1);
5877 mgmt_pending_remove(cmd);
5878
5879 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5880 DISCOVERY_FINDING);
5881 }
5882
start_discovery_sync(struct hci_dev * hdev,void * data)5883 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5884 {
5885 return hci_start_discovery_sync(hdev);
5886 }
5887
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5888 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5889 u16 op, void *data, u16 len)
5890 {
5891 struct mgmt_cp_start_discovery *cp = data;
5892 struct mgmt_pending_cmd *cmd;
5893 u8 status;
5894 int err;
5895
5896 bt_dev_dbg(hdev, "sock %p", sk);
5897
5898 hci_dev_lock(hdev);
5899
5900 if (!hdev_is_powered(hdev)) {
5901 err = mgmt_cmd_complete(sk, hdev->id, op,
5902 MGMT_STATUS_NOT_POWERED,
5903 &cp->type, sizeof(cp->type));
5904 goto failed;
5905 }
5906
5907 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5908 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5909 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5910 &cp->type, sizeof(cp->type));
5911 goto failed;
5912 }
5913
5914 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5915 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5916 &cp->type, sizeof(cp->type));
5917 goto failed;
5918 }
5919
5920 /* Can't start discovery when it is paused */
5921 if (hdev->discovery_paused) {
5922 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5923 &cp->type, sizeof(cp->type));
5924 goto failed;
5925 }
5926
5927 /* Clear the discovery filter first to free any previously
5928 * allocated memory for the UUID list.
5929 */
5930 hci_discovery_filter_clear(hdev);
5931
5932 hdev->discovery.type = cp->type;
5933 hdev->discovery.report_invalid_rssi = false;
5934 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5935 hdev->discovery.limited = true;
5936 else
5937 hdev->discovery.limited = false;
5938
5939 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5940 if (!cmd) {
5941 err = -ENOMEM;
5942 goto failed;
5943 }
5944
5945 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5946 start_discovery_complete);
5947 if (err < 0) {
5948 mgmt_pending_remove(cmd);
5949 goto failed;
5950 }
5951
5952 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5953
5954 failed:
5955 hci_dev_unlock(hdev);
5956 return err;
5957 }
5958
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5959 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5960 void *data, u16 len)
5961 {
5962 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5963 data, len);
5964 }
5965
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5966 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5967 void *data, u16 len)
5968 {
5969 return start_discovery_internal(sk, hdev,
5970 MGMT_OP_START_LIMITED_DISCOVERY,
5971 data, len);
5972 }
5973
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5974 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5975 void *data, u16 len)
5976 {
5977 struct mgmt_cp_start_service_discovery *cp = data;
5978 struct mgmt_pending_cmd *cmd;
5979 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5980 u16 uuid_count, expected_len;
5981 u8 status;
5982 int err;
5983
5984 bt_dev_dbg(hdev, "sock %p", sk);
5985
5986 hci_dev_lock(hdev);
5987
5988 if (!hdev_is_powered(hdev)) {
5989 err = mgmt_cmd_complete(sk, hdev->id,
5990 MGMT_OP_START_SERVICE_DISCOVERY,
5991 MGMT_STATUS_NOT_POWERED,
5992 &cp->type, sizeof(cp->type));
5993 goto failed;
5994 }
5995
5996 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5997 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5998 err = mgmt_cmd_complete(sk, hdev->id,
5999 MGMT_OP_START_SERVICE_DISCOVERY,
6000 MGMT_STATUS_BUSY, &cp->type,
6001 sizeof(cp->type));
6002 goto failed;
6003 }
6004
6005 if (hdev->discovery_paused) {
6006 err = mgmt_cmd_complete(sk, hdev->id,
6007 MGMT_OP_START_SERVICE_DISCOVERY,
6008 MGMT_STATUS_BUSY, &cp->type,
6009 sizeof(cp->type));
6010 goto failed;
6011 }
6012
6013 uuid_count = __le16_to_cpu(cp->uuid_count);
6014 if (uuid_count > max_uuid_count) {
6015 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6016 uuid_count);
6017 err = mgmt_cmd_complete(sk, hdev->id,
6018 MGMT_OP_START_SERVICE_DISCOVERY,
6019 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6020 sizeof(cp->type));
6021 goto failed;
6022 }
6023
6024 expected_len = sizeof(*cp) + uuid_count * 16;
6025 if (expected_len != len) {
6026 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6027 expected_len, len);
6028 err = mgmt_cmd_complete(sk, hdev->id,
6029 MGMT_OP_START_SERVICE_DISCOVERY,
6030 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6031 sizeof(cp->type));
6032 goto failed;
6033 }
6034
6035 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6036 err = mgmt_cmd_complete(sk, hdev->id,
6037 MGMT_OP_START_SERVICE_DISCOVERY,
6038 status, &cp->type, sizeof(cp->type));
6039 goto failed;
6040 }
6041
6042 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6043 hdev, data, len);
6044 if (!cmd) {
6045 err = -ENOMEM;
6046 goto failed;
6047 }
6048
6049 /* Clear the discovery filter first to free any previously
6050 * allocated memory for the UUID list.
6051 */
6052 hci_discovery_filter_clear(hdev);
6053
6054 hdev->discovery.result_filtering = true;
6055 hdev->discovery.type = cp->type;
6056 hdev->discovery.rssi = cp->rssi;
6057 hdev->discovery.uuid_count = uuid_count;
6058
6059 if (uuid_count > 0) {
6060 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6061 GFP_KERNEL);
6062 if (!hdev->discovery.uuids) {
6063 err = mgmt_cmd_complete(sk, hdev->id,
6064 MGMT_OP_START_SERVICE_DISCOVERY,
6065 MGMT_STATUS_FAILED,
6066 &cp->type, sizeof(cp->type));
6067 mgmt_pending_remove(cmd);
6068 goto failed;
6069 }
6070 }
6071
6072 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6073 start_discovery_complete);
6074 if (err < 0) {
6075 mgmt_pending_remove(cmd);
6076 goto failed;
6077 }
6078
6079 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6080
6081 failed:
6082 hci_dev_unlock(hdev);
6083 return err;
6084 }
6085
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6086 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6087 {
6088 struct mgmt_pending_cmd *cmd;
6089
6090 bt_dev_dbg(hdev, "status %u", status);
6091
6092 hci_dev_lock(hdev);
6093
6094 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6095 if (cmd) {
6096 cmd->cmd_complete(cmd, mgmt_status(status));
6097 mgmt_pending_remove(cmd);
6098 }
6099
6100 hci_dev_unlock(hdev);
6101 }
6102
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6103 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6104 {
6105 struct mgmt_pending_cmd *cmd = data;
6106
6107 if (err == -ECANCELED ||
6108 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6109 return;
6110
6111 bt_dev_dbg(hdev, "err %d", err);
6112
6113 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6114 cmd->param, 1);
6115 mgmt_pending_remove(cmd);
6116
6117 if (!err)
6118 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6119 }
6120
stop_discovery_sync(struct hci_dev * hdev,void * data)6121 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6122 {
6123 return hci_stop_discovery_sync(hdev);
6124 }
6125
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6126 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6127 u16 len)
6128 {
6129 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6130 struct mgmt_pending_cmd *cmd;
6131 int err;
6132
6133 bt_dev_dbg(hdev, "sock %p", sk);
6134
6135 hci_dev_lock(hdev);
6136
6137 if (!hci_discovery_active(hdev)) {
6138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6139 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6140 sizeof(mgmt_cp->type));
6141 goto unlock;
6142 }
6143
6144 if (hdev->discovery.type != mgmt_cp->type) {
6145 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6146 MGMT_STATUS_INVALID_PARAMS,
6147 &mgmt_cp->type, sizeof(mgmt_cp->type));
6148 goto unlock;
6149 }
6150
6151 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6152 if (!cmd) {
6153 err = -ENOMEM;
6154 goto unlock;
6155 }
6156
6157 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6158 stop_discovery_complete);
6159 if (err < 0) {
6160 mgmt_pending_remove(cmd);
6161 goto unlock;
6162 }
6163
6164 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6165
6166 unlock:
6167 hci_dev_unlock(hdev);
6168 return err;
6169 }
6170
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6171 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6172 u16 len)
6173 {
6174 struct mgmt_cp_confirm_name *cp = data;
6175 struct inquiry_entry *e;
6176 int err;
6177
6178 bt_dev_dbg(hdev, "sock %p", sk);
6179
6180 hci_dev_lock(hdev);
6181
6182 if (!hci_discovery_active(hdev)) {
6183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6184 MGMT_STATUS_FAILED, &cp->addr,
6185 sizeof(cp->addr));
6186 goto failed;
6187 }
6188
6189 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6190 if (!e) {
6191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6192 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6193 sizeof(cp->addr));
6194 goto failed;
6195 }
6196
6197 if (cp->name_known) {
6198 e->name_state = NAME_KNOWN;
6199 list_del(&e->list);
6200 } else {
6201 e->name_state = NAME_NEEDED;
6202 hci_inquiry_cache_update_resolve(hdev, e);
6203 }
6204
6205 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6206 &cp->addr, sizeof(cp->addr));
6207
6208 failed:
6209 hci_dev_unlock(hdev);
6210 return err;
6211 }
6212
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6213 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6214 u16 len)
6215 {
6216 struct mgmt_cp_block_device *cp = data;
6217 u8 status;
6218 int err;
6219
6220 bt_dev_dbg(hdev, "sock %p", sk);
6221
6222 if (!bdaddr_type_is_valid(cp->addr.type))
6223 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6224 MGMT_STATUS_INVALID_PARAMS,
6225 &cp->addr, sizeof(cp->addr));
6226
6227 hci_dev_lock(hdev);
6228
6229 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6230 cp->addr.type);
6231 if (err < 0) {
6232 status = MGMT_STATUS_FAILED;
6233 goto done;
6234 }
6235
6236 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6237 sk);
6238 status = MGMT_STATUS_SUCCESS;
6239
6240 done:
6241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6242 &cp->addr, sizeof(cp->addr));
6243
6244 hci_dev_unlock(hdev);
6245
6246 return err;
6247 }
6248
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6249 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6250 u16 len)
6251 {
6252 struct mgmt_cp_unblock_device *cp = data;
6253 u8 status;
6254 int err;
6255
6256 bt_dev_dbg(hdev, "sock %p", sk);
6257
6258 if (!bdaddr_type_is_valid(cp->addr.type))
6259 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6260 MGMT_STATUS_INVALID_PARAMS,
6261 &cp->addr, sizeof(cp->addr));
6262
6263 hci_dev_lock(hdev);
6264
6265 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6266 cp->addr.type);
6267 if (err < 0) {
6268 status = MGMT_STATUS_INVALID_PARAMS;
6269 goto done;
6270 }
6271
6272 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6273 sk);
6274 status = MGMT_STATUS_SUCCESS;
6275
6276 done:
6277 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6278 &cp->addr, sizeof(cp->addr));
6279
6280 hci_dev_unlock(hdev);
6281
6282 return err;
6283 }
6284
set_device_id_sync(struct hci_dev * hdev,void * data)6285 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6286 {
6287 return hci_update_eir_sync(hdev);
6288 }
6289
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6290 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6291 u16 len)
6292 {
6293 struct mgmt_cp_set_device_id *cp = data;
6294 int err;
6295 __u16 source;
6296
6297 bt_dev_dbg(hdev, "sock %p", sk);
6298
6299 source = __le16_to_cpu(cp->source);
6300
6301 if (source > 0x0002)
6302 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6303 MGMT_STATUS_INVALID_PARAMS);
6304
6305 hci_dev_lock(hdev);
6306
6307 hdev->devid_source = source;
6308 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6309 hdev->devid_product = __le16_to_cpu(cp->product);
6310 hdev->devid_version = __le16_to_cpu(cp->version);
6311
6312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6313 NULL, 0);
6314
6315 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6316
6317 hci_dev_unlock(hdev);
6318
6319 return err;
6320 }
6321
enable_advertising_instance(struct hci_dev * hdev,int err)6322 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6323 {
6324 if (err)
6325 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6326 else
6327 bt_dev_dbg(hdev, "status %d", err);
6328 }
6329
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6330 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6331 {
6332 struct cmd_lookup match = { NULL, hdev };
6333 u8 instance;
6334 struct adv_info *adv_instance;
6335 u8 status = mgmt_status(err);
6336
6337 if (status) {
6338 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
6339 cmd_status_rsp, &status);
6340 return;
6341 }
6342
6343 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6344 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6345 else
6346 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6347
6348 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
6349 &match);
6350
6351 new_settings(hdev, match.sk);
6352
6353 if (match.sk)
6354 sock_put(match.sk);
6355
6356 /* If "Set Advertising" was just disabled and instance advertising was
6357 * set up earlier, then re-enable multi-instance advertising.
6358 */
6359 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6360 list_empty(&hdev->adv_instances))
6361 return;
6362
6363 instance = hdev->cur_adv_instance;
6364 if (!instance) {
6365 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6366 struct adv_info, list);
6367 if (!adv_instance)
6368 return;
6369
6370 instance = adv_instance->instance;
6371 }
6372
6373 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6374
6375 enable_advertising_instance(hdev, err);
6376 }
6377
set_adv_sync(struct hci_dev * hdev,void * data)6378 static int set_adv_sync(struct hci_dev *hdev, void *data)
6379 {
6380 struct mgmt_pending_cmd *cmd = data;
6381 struct mgmt_mode *cp = cmd->param;
6382 u8 val = !!cp->val;
6383
6384 if (cp->val == 0x02)
6385 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6386 else
6387 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6388
6389 cancel_adv_timeout(hdev);
6390
6391 if (val) {
6392 /* Switch to instance "0" for the Set Advertising setting.
6393 * We cannot use update_[adv|scan_rsp]_data() here as the
6394 * HCI_ADVERTISING flag is not yet set.
6395 */
6396 hdev->cur_adv_instance = 0x00;
6397
6398 if (ext_adv_capable(hdev)) {
6399 hci_start_ext_adv_sync(hdev, 0x00);
6400 } else {
6401 hci_update_adv_data_sync(hdev, 0x00);
6402 hci_update_scan_rsp_data_sync(hdev, 0x00);
6403 hci_enable_advertising_sync(hdev);
6404 }
6405 } else {
6406 hci_disable_advertising_sync(hdev);
6407 }
6408
6409 return 0;
6410 }
6411
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6412 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6413 u16 len)
6414 {
6415 struct mgmt_mode *cp = data;
6416 struct mgmt_pending_cmd *cmd;
6417 u8 val, status;
6418 int err;
6419
6420 bt_dev_dbg(hdev, "sock %p", sk);
6421
6422 status = mgmt_le_support(hdev);
6423 if (status)
6424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6425 status);
6426
6427 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6429 MGMT_STATUS_INVALID_PARAMS);
6430
6431 if (hdev->advertising_paused)
6432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6433 MGMT_STATUS_BUSY);
6434
6435 hci_dev_lock(hdev);
6436
6437 val = !!cp->val;
6438
6439 /* The following conditions are ones which mean that we should
6440 * not do any HCI communication but directly send a mgmt
6441 * response to user space (after toggling the flag if
6442 * necessary).
6443 */
6444 if (!hdev_is_powered(hdev) ||
6445 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6446 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6447 hci_dev_test_flag(hdev, HCI_MESH) ||
6448 hci_conn_num(hdev, LE_LINK) > 0 ||
6449 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6450 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6451 bool changed;
6452
6453 if (cp->val) {
6454 hdev->cur_adv_instance = 0x00;
6455 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6456 if (cp->val == 0x02)
6457 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6458 else
6459 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6460 } else {
6461 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6462 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6463 }
6464
6465 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6466 if (err < 0)
6467 goto unlock;
6468
6469 if (changed)
6470 err = new_settings(hdev, sk);
6471
6472 goto unlock;
6473 }
6474
6475 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6476 pending_find(MGMT_OP_SET_LE, hdev)) {
6477 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6478 MGMT_STATUS_BUSY);
6479 goto unlock;
6480 }
6481
6482 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6483 if (!cmd)
6484 err = -ENOMEM;
6485 else
6486 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6487 set_advertising_complete);
6488
6489 if (err < 0 && cmd)
6490 mgmt_pending_remove(cmd);
6491
6492 unlock:
6493 hci_dev_unlock(hdev);
6494 return err;
6495 }
6496
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6497 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6498 void *data, u16 len)
6499 {
6500 struct mgmt_cp_set_static_address *cp = data;
6501 int err;
6502
6503 bt_dev_dbg(hdev, "sock %p", sk);
6504
6505 if (!lmp_le_capable(hdev))
6506 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6507 MGMT_STATUS_NOT_SUPPORTED);
6508
6509 if (hdev_is_powered(hdev))
6510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6511 MGMT_STATUS_REJECTED);
6512
6513 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6514 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6515 return mgmt_cmd_status(sk, hdev->id,
6516 MGMT_OP_SET_STATIC_ADDRESS,
6517 MGMT_STATUS_INVALID_PARAMS);
6518
6519 /* Two most significant bits shall be set */
6520 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6521 return mgmt_cmd_status(sk, hdev->id,
6522 MGMT_OP_SET_STATIC_ADDRESS,
6523 MGMT_STATUS_INVALID_PARAMS);
6524 }
6525
6526 hci_dev_lock(hdev);
6527
6528 bacpy(&hdev->static_addr, &cp->bdaddr);
6529
6530 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6531 if (err < 0)
6532 goto unlock;
6533
6534 err = new_settings(hdev, sk);
6535
6536 unlock:
6537 hci_dev_unlock(hdev);
6538 return err;
6539 }
6540
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6541 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6542 void *data, u16 len)
6543 {
6544 struct mgmt_cp_set_scan_params *cp = data;
6545 __u16 interval, window;
6546 int err;
6547
6548 bt_dev_dbg(hdev, "sock %p", sk);
6549
6550 if (!lmp_le_capable(hdev))
6551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6552 MGMT_STATUS_NOT_SUPPORTED);
6553
6554 /* Keep allowed ranges in sync with set_mesh() */
6555 interval = __le16_to_cpu(cp->interval);
6556
6557 if (interval < 0x0004 || interval > 0x4000)
6558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6559 MGMT_STATUS_INVALID_PARAMS);
6560
6561 window = __le16_to_cpu(cp->window);
6562
6563 if (window < 0x0004 || window > 0x4000)
6564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6565 MGMT_STATUS_INVALID_PARAMS);
6566
6567 if (window > interval)
6568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6569 MGMT_STATUS_INVALID_PARAMS);
6570
6571 hci_dev_lock(hdev);
6572
6573 hdev->le_scan_interval = interval;
6574 hdev->le_scan_window = window;
6575
6576 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6577 NULL, 0);
6578
6579 /* If background scan is running, restart it so new parameters are
6580 * loaded.
6581 */
6582 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6583 hdev->discovery.state == DISCOVERY_STOPPED)
6584 hci_update_passive_scan(hdev);
6585
6586 hci_dev_unlock(hdev);
6587
6588 return err;
6589 }
6590
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6591 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6592 {
6593 struct mgmt_pending_cmd *cmd = data;
6594
6595 bt_dev_dbg(hdev, "err %d", err);
6596
6597 if (err) {
6598 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6599 mgmt_status(err));
6600 } else {
6601 struct mgmt_mode *cp = cmd->param;
6602
6603 if (cp->val)
6604 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6605 else
6606 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6607
6608 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6609 new_settings(hdev, cmd->sk);
6610 }
6611
6612 mgmt_pending_free(cmd);
6613 }
6614
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6615 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6616 {
6617 struct mgmt_pending_cmd *cmd = data;
6618 struct mgmt_mode *cp = cmd->param;
6619
6620 return hci_write_fast_connectable_sync(hdev, cp->val);
6621 }
6622
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6623 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6624 void *data, u16 len)
6625 {
6626 struct mgmt_mode *cp = data;
6627 struct mgmt_pending_cmd *cmd;
6628 int err;
6629
6630 bt_dev_dbg(hdev, "sock %p", sk);
6631
6632 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6633 hdev->hci_ver < BLUETOOTH_VER_1_2)
6634 return mgmt_cmd_status(sk, hdev->id,
6635 MGMT_OP_SET_FAST_CONNECTABLE,
6636 MGMT_STATUS_NOT_SUPPORTED);
6637
6638 if (cp->val != 0x00 && cp->val != 0x01)
6639 return mgmt_cmd_status(sk, hdev->id,
6640 MGMT_OP_SET_FAST_CONNECTABLE,
6641 MGMT_STATUS_INVALID_PARAMS);
6642
6643 hci_dev_lock(hdev);
6644
6645 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6646 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6647 goto unlock;
6648 }
6649
6650 if (!hdev_is_powered(hdev)) {
6651 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6652 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6653 new_settings(hdev, sk);
6654 goto unlock;
6655 }
6656
6657 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6658 len);
6659 if (!cmd)
6660 err = -ENOMEM;
6661 else
6662 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6663 fast_connectable_complete);
6664
6665 if (err < 0) {
6666 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6667 MGMT_STATUS_FAILED);
6668
6669 if (cmd)
6670 mgmt_pending_free(cmd);
6671 }
6672
6673 unlock:
6674 hci_dev_unlock(hdev);
6675
6676 return err;
6677 }
6678
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6679 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6680 {
6681 struct mgmt_pending_cmd *cmd = data;
6682
6683 bt_dev_dbg(hdev, "err %d", err);
6684
6685 if (err) {
6686 u8 mgmt_err = mgmt_status(err);
6687
6688 /* We need to restore the flag if related HCI commands
6689 * failed.
6690 */
6691 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6692
6693 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6694 } else {
6695 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6696 new_settings(hdev, cmd->sk);
6697 }
6698
6699 mgmt_pending_free(cmd);
6700 }
6701
set_bredr_sync(struct hci_dev * hdev,void * data)6702 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6703 {
6704 int status;
6705
6706 status = hci_write_fast_connectable_sync(hdev, false);
6707
6708 if (!status)
6709 status = hci_update_scan_sync(hdev);
6710
6711 /* Since only the advertising data flags will change, there
6712 * is no need to update the scan response data.
6713 */
6714 if (!status)
6715 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6716
6717 return status;
6718 }
6719
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6720 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6721 {
6722 struct mgmt_mode *cp = data;
6723 struct mgmt_pending_cmd *cmd;
6724 int err;
6725
6726 bt_dev_dbg(hdev, "sock %p", sk);
6727
6728 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6729 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6730 MGMT_STATUS_NOT_SUPPORTED);
6731
6732 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6733 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6734 MGMT_STATUS_REJECTED);
6735
6736 if (cp->val != 0x00 && cp->val != 0x01)
6737 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6738 MGMT_STATUS_INVALID_PARAMS);
6739
6740 hci_dev_lock(hdev);
6741
6742 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6743 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6744 goto unlock;
6745 }
6746
6747 if (!hdev_is_powered(hdev)) {
6748 if (!cp->val) {
6749 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6750 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6751 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6752 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6753 }
6754
6755 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6756
6757 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6758 if (err < 0)
6759 goto unlock;
6760
6761 err = new_settings(hdev, sk);
6762 goto unlock;
6763 }
6764
6765 /* Reject disabling when powered on */
6766 if (!cp->val) {
6767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6768 MGMT_STATUS_REJECTED);
6769 goto unlock;
6770 } else {
6771 /* When configuring a dual-mode controller to operate
6772 * with LE only and using a static address, then switching
6773 * BR/EDR back on is not allowed.
6774 *
6775 * Dual-mode controllers shall operate with the public
6776 * address as its identity address for BR/EDR and LE. So
6777 * reject the attempt to create an invalid configuration.
6778 *
6779 * The same restrictions applies when secure connections
6780 * has been enabled. For BR/EDR this is a controller feature
6781 * while for LE it is a host stack feature. This means that
6782 * switching BR/EDR back on when secure connections has been
6783 * enabled is not a supported transaction.
6784 */
6785 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6786 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6787 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6789 MGMT_STATUS_REJECTED);
6790 goto unlock;
6791 }
6792 }
6793
6794 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6795 if (!cmd)
6796 err = -ENOMEM;
6797 else
6798 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6799 set_bredr_complete);
6800
6801 if (err < 0) {
6802 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6803 MGMT_STATUS_FAILED);
6804 if (cmd)
6805 mgmt_pending_free(cmd);
6806
6807 goto unlock;
6808 }
6809
6810 /* We need to flip the bit already here so that
6811 * hci_req_update_adv_data generates the correct flags.
6812 */
6813 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6814
6815 unlock:
6816 hci_dev_unlock(hdev);
6817 return err;
6818 }
6819
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6820 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6821 {
6822 struct mgmt_pending_cmd *cmd = data;
6823 struct mgmt_mode *cp;
6824
6825 bt_dev_dbg(hdev, "err %d", err);
6826
6827 if (err) {
6828 u8 mgmt_err = mgmt_status(err);
6829
6830 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6831 goto done;
6832 }
6833
6834 cp = cmd->param;
6835
6836 switch (cp->val) {
6837 case 0x00:
6838 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6839 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6840 break;
6841 case 0x01:
6842 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6843 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6844 break;
6845 case 0x02:
6846 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6847 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6848 break;
6849 }
6850
6851 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6852 new_settings(hdev, cmd->sk);
6853
6854 done:
6855 mgmt_pending_free(cmd);
6856 }
6857
set_secure_conn_sync(struct hci_dev * hdev,void * data)6858 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6859 {
6860 struct mgmt_pending_cmd *cmd = data;
6861 struct mgmt_mode *cp = cmd->param;
6862 u8 val = !!cp->val;
6863
6864 /* Force write of val */
6865 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6866
6867 return hci_write_sc_support_sync(hdev, val);
6868 }
6869
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6870 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6871 void *data, u16 len)
6872 {
6873 struct mgmt_mode *cp = data;
6874 struct mgmt_pending_cmd *cmd;
6875 u8 val;
6876 int err;
6877
6878 bt_dev_dbg(hdev, "sock %p", sk);
6879
6880 if (!lmp_sc_capable(hdev) &&
6881 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6883 MGMT_STATUS_NOT_SUPPORTED);
6884
6885 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6886 lmp_sc_capable(hdev) &&
6887 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6889 MGMT_STATUS_REJECTED);
6890
6891 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6892 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6893 MGMT_STATUS_INVALID_PARAMS);
6894
6895 hci_dev_lock(hdev);
6896
6897 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6898 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6899 bool changed;
6900
6901 if (cp->val) {
6902 changed = !hci_dev_test_and_set_flag(hdev,
6903 HCI_SC_ENABLED);
6904 if (cp->val == 0x02)
6905 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6906 else
6907 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6908 } else {
6909 changed = hci_dev_test_and_clear_flag(hdev,
6910 HCI_SC_ENABLED);
6911 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6912 }
6913
6914 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6915 if (err < 0)
6916 goto failed;
6917
6918 if (changed)
6919 err = new_settings(hdev, sk);
6920
6921 goto failed;
6922 }
6923
6924 val = !!cp->val;
6925
6926 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6927 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6928 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6929 goto failed;
6930 }
6931
6932 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6933 if (!cmd)
6934 err = -ENOMEM;
6935 else
6936 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6937 set_secure_conn_complete);
6938
6939 if (err < 0) {
6940 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6941 MGMT_STATUS_FAILED);
6942 if (cmd)
6943 mgmt_pending_free(cmd);
6944 }
6945
6946 failed:
6947 hci_dev_unlock(hdev);
6948 return err;
6949 }
6950
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6951 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6952 void *data, u16 len)
6953 {
6954 struct mgmt_mode *cp = data;
6955 bool changed, use_changed;
6956 int err;
6957
6958 bt_dev_dbg(hdev, "sock %p", sk);
6959
6960 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6962 MGMT_STATUS_INVALID_PARAMS);
6963
6964 hci_dev_lock(hdev);
6965
6966 if (cp->val)
6967 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6968 else
6969 changed = hci_dev_test_and_clear_flag(hdev,
6970 HCI_KEEP_DEBUG_KEYS);
6971
6972 if (cp->val == 0x02)
6973 use_changed = !hci_dev_test_and_set_flag(hdev,
6974 HCI_USE_DEBUG_KEYS);
6975 else
6976 use_changed = hci_dev_test_and_clear_flag(hdev,
6977 HCI_USE_DEBUG_KEYS);
6978
6979 if (hdev_is_powered(hdev) && use_changed &&
6980 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6981 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6982 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6983 sizeof(mode), &mode);
6984 }
6985
6986 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6987 if (err < 0)
6988 goto unlock;
6989
6990 if (changed)
6991 err = new_settings(hdev, sk);
6992
6993 unlock:
6994 hci_dev_unlock(hdev);
6995 return err;
6996 }
6997
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6998 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6999 u16 len)
7000 {
7001 struct mgmt_cp_set_privacy *cp = cp_data;
7002 bool changed;
7003 int err;
7004
7005 bt_dev_dbg(hdev, "sock %p", sk);
7006
7007 if (!lmp_le_capable(hdev))
7008 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7009 MGMT_STATUS_NOT_SUPPORTED);
7010
7011 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7012 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7013 MGMT_STATUS_INVALID_PARAMS);
7014
7015 if (hdev_is_powered(hdev))
7016 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7017 MGMT_STATUS_REJECTED);
7018
7019 hci_dev_lock(hdev);
7020
7021 /* If user space supports this command it is also expected to
7022 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7023 */
7024 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7025
7026 if (cp->privacy) {
7027 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7028 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7029 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7030 hci_adv_instances_set_rpa_expired(hdev, true);
7031 if (cp->privacy == 0x02)
7032 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7033 else
7034 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7035 } else {
7036 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7037 memset(hdev->irk, 0, sizeof(hdev->irk));
7038 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7039 hci_adv_instances_set_rpa_expired(hdev, false);
7040 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7041 }
7042
7043 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7044 if (err < 0)
7045 goto unlock;
7046
7047 if (changed)
7048 err = new_settings(hdev, sk);
7049
7050 unlock:
7051 hci_dev_unlock(hdev);
7052 return err;
7053 }
7054
irk_is_valid(struct mgmt_irk_info * irk)7055 static bool irk_is_valid(struct mgmt_irk_info *irk)
7056 {
7057 switch (irk->addr.type) {
7058 case BDADDR_LE_PUBLIC:
7059 return true;
7060
7061 case BDADDR_LE_RANDOM:
7062 /* Two most significant bits shall be set */
7063 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7064 return false;
7065 return true;
7066 }
7067
7068 return false;
7069 }
7070
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7071 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7072 u16 len)
7073 {
7074 struct mgmt_cp_load_irks *cp = cp_data;
7075 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7076 sizeof(struct mgmt_irk_info));
7077 u16 irk_count, expected_len;
7078 int i, err;
7079
7080 bt_dev_dbg(hdev, "sock %p", sk);
7081
7082 if (!lmp_le_capable(hdev))
7083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7084 MGMT_STATUS_NOT_SUPPORTED);
7085
7086 irk_count = __le16_to_cpu(cp->irk_count);
7087 if (irk_count > max_irk_count) {
7088 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7089 irk_count);
7090 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7091 MGMT_STATUS_INVALID_PARAMS);
7092 }
7093
7094 expected_len = struct_size(cp, irks, irk_count);
7095 if (expected_len != len) {
7096 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7097 expected_len, len);
7098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7099 MGMT_STATUS_INVALID_PARAMS);
7100 }
7101
7102 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7103
7104 for (i = 0; i < irk_count; i++) {
7105 struct mgmt_irk_info *key = &cp->irks[i];
7106
7107 if (!irk_is_valid(key))
7108 return mgmt_cmd_status(sk, hdev->id,
7109 MGMT_OP_LOAD_IRKS,
7110 MGMT_STATUS_INVALID_PARAMS);
7111 }
7112
7113 hci_dev_lock(hdev);
7114
7115 hci_smp_irks_clear(hdev);
7116
7117 for (i = 0; i < irk_count; i++) {
7118 struct mgmt_irk_info *irk = &cp->irks[i];
7119
7120 if (hci_is_blocked_key(hdev,
7121 HCI_BLOCKED_KEY_TYPE_IRK,
7122 irk->val)) {
7123 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7124 &irk->addr.bdaddr);
7125 continue;
7126 }
7127
7128 hci_add_irk(hdev, &irk->addr.bdaddr,
7129 le_addr_type(irk->addr.type), irk->val,
7130 BDADDR_ANY);
7131 }
7132
7133 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7134
7135 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7136
7137 hci_dev_unlock(hdev);
7138
7139 return err;
7140 }
7141
ltk_is_valid(struct mgmt_ltk_info * key)7142 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7143 {
7144 if (key->initiator != 0x00 && key->initiator != 0x01)
7145 return false;
7146
7147 switch (key->addr.type) {
7148 case BDADDR_LE_PUBLIC:
7149 return true;
7150
7151 case BDADDR_LE_RANDOM:
7152 /* Two most significant bits shall be set */
7153 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7154 return false;
7155 return true;
7156 }
7157
7158 return false;
7159 }
7160
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7161 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7162 void *cp_data, u16 len)
7163 {
7164 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7165 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7166 sizeof(struct mgmt_ltk_info));
7167 u16 key_count, expected_len;
7168 int i, err;
7169
7170 bt_dev_dbg(hdev, "sock %p", sk);
7171
7172 if (!lmp_le_capable(hdev))
7173 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7174 MGMT_STATUS_NOT_SUPPORTED);
7175
7176 key_count = __le16_to_cpu(cp->key_count);
7177 if (key_count > max_key_count) {
7178 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7179 key_count);
7180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7181 MGMT_STATUS_INVALID_PARAMS);
7182 }
7183
7184 expected_len = struct_size(cp, keys, key_count);
7185 if (expected_len != len) {
7186 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7187 expected_len, len);
7188 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7189 MGMT_STATUS_INVALID_PARAMS);
7190 }
7191
7192 bt_dev_dbg(hdev, "key_count %u", key_count);
7193
7194 hci_dev_lock(hdev);
7195
7196 hci_smp_ltks_clear(hdev);
7197
7198 for (i = 0; i < key_count; i++) {
7199 struct mgmt_ltk_info *key = &cp->keys[i];
7200 u8 type, authenticated;
7201
7202 if (hci_is_blocked_key(hdev,
7203 HCI_BLOCKED_KEY_TYPE_LTK,
7204 key->val)) {
7205 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7206 &key->addr.bdaddr);
7207 continue;
7208 }
7209
7210 if (!ltk_is_valid(key)) {
7211 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7212 &key->addr.bdaddr);
7213 continue;
7214 }
7215
7216 switch (key->type) {
7217 case MGMT_LTK_UNAUTHENTICATED:
7218 authenticated = 0x00;
7219 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7220 break;
7221 case MGMT_LTK_AUTHENTICATED:
7222 authenticated = 0x01;
7223 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7224 break;
7225 case MGMT_LTK_P256_UNAUTH:
7226 authenticated = 0x00;
7227 type = SMP_LTK_P256;
7228 break;
7229 case MGMT_LTK_P256_AUTH:
7230 authenticated = 0x01;
7231 type = SMP_LTK_P256;
7232 break;
7233 case MGMT_LTK_P256_DEBUG:
7234 authenticated = 0x00;
7235 type = SMP_LTK_P256_DEBUG;
7236 fallthrough;
7237 default:
7238 continue;
7239 }
7240
7241 hci_add_ltk(hdev, &key->addr.bdaddr,
7242 le_addr_type(key->addr.type), type, authenticated,
7243 key->val, key->enc_size, key->ediv, key->rand);
7244 }
7245
7246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7247 NULL, 0);
7248
7249 hci_dev_unlock(hdev);
7250
7251 return err;
7252 }
7253
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7254 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7255 {
7256 struct mgmt_pending_cmd *cmd = data;
7257 struct hci_conn *conn = cmd->user_data;
7258 struct mgmt_cp_get_conn_info *cp = cmd->param;
7259 struct mgmt_rp_get_conn_info rp;
7260 u8 status;
7261
7262 bt_dev_dbg(hdev, "err %d", err);
7263
7264 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7265
7266 status = mgmt_status(err);
7267 if (status == MGMT_STATUS_SUCCESS) {
7268 rp.rssi = conn->rssi;
7269 rp.tx_power = conn->tx_power;
7270 rp.max_tx_power = conn->max_tx_power;
7271 } else {
7272 rp.rssi = HCI_RSSI_INVALID;
7273 rp.tx_power = HCI_TX_POWER_INVALID;
7274 rp.max_tx_power = HCI_TX_POWER_INVALID;
7275 }
7276
7277 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7278 &rp, sizeof(rp));
7279
7280 mgmt_pending_free(cmd);
7281 }
7282
get_conn_info_sync(struct hci_dev * hdev,void * data)7283 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7284 {
7285 struct mgmt_pending_cmd *cmd = data;
7286 struct mgmt_cp_get_conn_info *cp = cmd->param;
7287 struct hci_conn *conn;
7288 int err;
7289 __le16 handle;
7290
7291 /* Make sure we are still connected */
7292 if (cp->addr.type == BDADDR_BREDR)
7293 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7294 &cp->addr.bdaddr);
7295 else
7296 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7297
7298 if (!conn || conn->state != BT_CONNECTED)
7299 return MGMT_STATUS_NOT_CONNECTED;
7300
7301 cmd->user_data = conn;
7302 handle = cpu_to_le16(conn->handle);
7303
7304 /* Refresh RSSI each time */
7305 err = hci_read_rssi_sync(hdev, handle);
7306
7307 /* For LE links TX power does not change thus we don't need to
7308 * query for it once value is known.
7309 */
7310 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7311 conn->tx_power == HCI_TX_POWER_INVALID))
7312 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7313
7314 /* Max TX power needs to be read only once per connection */
7315 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7316 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7317
7318 return err;
7319 }
7320
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7321 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7322 u16 len)
7323 {
7324 struct mgmt_cp_get_conn_info *cp = data;
7325 struct mgmt_rp_get_conn_info rp;
7326 struct hci_conn *conn;
7327 unsigned long conn_info_age;
7328 int err = 0;
7329
7330 bt_dev_dbg(hdev, "sock %p", sk);
7331
7332 memset(&rp, 0, sizeof(rp));
7333 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7334 rp.addr.type = cp->addr.type;
7335
7336 if (!bdaddr_type_is_valid(cp->addr.type))
7337 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7338 MGMT_STATUS_INVALID_PARAMS,
7339 &rp, sizeof(rp));
7340
7341 hci_dev_lock(hdev);
7342
7343 if (!hdev_is_powered(hdev)) {
7344 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7345 MGMT_STATUS_NOT_POWERED, &rp,
7346 sizeof(rp));
7347 goto unlock;
7348 }
7349
7350 if (cp->addr.type == BDADDR_BREDR)
7351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7352 &cp->addr.bdaddr);
7353 else
7354 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7355
7356 if (!conn || conn->state != BT_CONNECTED) {
7357 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7358 MGMT_STATUS_NOT_CONNECTED, &rp,
7359 sizeof(rp));
7360 goto unlock;
7361 }
7362
7363 /* To avoid client trying to guess when to poll again for information we
7364 * calculate conn info age as random value between min/max set in hdev.
7365 */
7366 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7367 hdev->conn_info_max_age - 1);
7368
7369 /* Query controller to refresh cached values if they are too old or were
7370 * never read.
7371 */
7372 if (time_after(jiffies, conn->conn_info_timestamp +
7373 msecs_to_jiffies(conn_info_age)) ||
7374 !conn->conn_info_timestamp) {
7375 struct mgmt_pending_cmd *cmd;
7376
7377 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7378 len);
7379 if (!cmd) {
7380 err = -ENOMEM;
7381 } else {
7382 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7383 cmd, get_conn_info_complete);
7384 }
7385
7386 if (err < 0) {
7387 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7388 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7389
7390 if (cmd)
7391 mgmt_pending_free(cmd);
7392
7393 goto unlock;
7394 }
7395
7396 conn->conn_info_timestamp = jiffies;
7397 } else {
7398 /* Cache is valid, just reply with values cached in hci_conn */
7399 rp.rssi = conn->rssi;
7400 rp.tx_power = conn->tx_power;
7401 rp.max_tx_power = conn->max_tx_power;
7402
7403 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7404 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7405 }
7406
7407 unlock:
7408 hci_dev_unlock(hdev);
7409 return err;
7410 }
7411
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7412 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7413 {
7414 struct mgmt_pending_cmd *cmd = data;
7415 struct mgmt_cp_get_clock_info *cp = cmd->param;
7416 struct mgmt_rp_get_clock_info rp;
7417 struct hci_conn *conn = cmd->user_data;
7418 u8 status = mgmt_status(err);
7419
7420 bt_dev_dbg(hdev, "err %d", err);
7421
7422 memset(&rp, 0, sizeof(rp));
7423 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7424 rp.addr.type = cp->addr.type;
7425
7426 if (err)
7427 goto complete;
7428
7429 rp.local_clock = cpu_to_le32(hdev->clock);
7430
7431 if (conn) {
7432 rp.piconet_clock = cpu_to_le32(conn->clock);
7433 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7434 }
7435
7436 complete:
7437 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7438 sizeof(rp));
7439
7440 mgmt_pending_free(cmd);
7441 }
7442
get_clock_info_sync(struct hci_dev * hdev,void * data)7443 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7444 {
7445 struct mgmt_pending_cmd *cmd = data;
7446 struct mgmt_cp_get_clock_info *cp = cmd->param;
7447 struct hci_cp_read_clock hci_cp;
7448 struct hci_conn *conn;
7449
7450 memset(&hci_cp, 0, sizeof(hci_cp));
7451 hci_read_clock_sync(hdev, &hci_cp);
7452
7453 /* Make sure connection still exists */
7454 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7455 if (!conn || conn->state != BT_CONNECTED)
7456 return MGMT_STATUS_NOT_CONNECTED;
7457
7458 cmd->user_data = conn;
7459 hci_cp.handle = cpu_to_le16(conn->handle);
7460 hci_cp.which = 0x01; /* Piconet clock */
7461
7462 return hci_read_clock_sync(hdev, &hci_cp);
7463 }
7464
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7465 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7466 u16 len)
7467 {
7468 struct mgmt_cp_get_clock_info *cp = data;
7469 struct mgmt_rp_get_clock_info rp;
7470 struct mgmt_pending_cmd *cmd;
7471 struct hci_conn *conn;
7472 int err;
7473
7474 bt_dev_dbg(hdev, "sock %p", sk);
7475
7476 memset(&rp, 0, sizeof(rp));
7477 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7478 rp.addr.type = cp->addr.type;
7479
7480 if (cp->addr.type != BDADDR_BREDR)
7481 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7482 MGMT_STATUS_INVALID_PARAMS,
7483 &rp, sizeof(rp));
7484
7485 hci_dev_lock(hdev);
7486
7487 if (!hdev_is_powered(hdev)) {
7488 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7489 MGMT_STATUS_NOT_POWERED, &rp,
7490 sizeof(rp));
7491 goto unlock;
7492 }
7493
7494 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7496 &cp->addr.bdaddr);
7497 if (!conn || conn->state != BT_CONNECTED) {
7498 err = mgmt_cmd_complete(sk, hdev->id,
7499 MGMT_OP_GET_CLOCK_INFO,
7500 MGMT_STATUS_NOT_CONNECTED,
7501 &rp, sizeof(rp));
7502 goto unlock;
7503 }
7504 } else {
7505 conn = NULL;
7506 }
7507
7508 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7509 if (!cmd)
7510 err = -ENOMEM;
7511 else
7512 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7513 get_clock_info_complete);
7514
7515 if (err < 0) {
7516 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7517 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7518
7519 if (cmd)
7520 mgmt_pending_free(cmd);
7521 }
7522
7523
7524 unlock:
7525 hci_dev_unlock(hdev);
7526 return err;
7527 }
7528
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7529 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7530 {
7531 struct hci_conn *conn;
7532
7533 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7534 if (!conn)
7535 return false;
7536
7537 if (conn->dst_type != type)
7538 return false;
7539
7540 if (conn->state != BT_CONNECTED)
7541 return false;
7542
7543 return true;
7544 }
7545
7546 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7547 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7548 u8 addr_type, u8 auto_connect)
7549 {
7550 struct hci_conn_params *params;
7551
7552 params = hci_conn_params_add(hdev, addr, addr_type);
7553 if (!params)
7554 return -EIO;
7555
7556 if (params->auto_connect == auto_connect)
7557 return 0;
7558
7559 hci_pend_le_list_del_init(params);
7560
7561 switch (auto_connect) {
7562 case HCI_AUTO_CONN_DISABLED:
7563 case HCI_AUTO_CONN_LINK_LOSS:
7564 /* If auto connect is being disabled when we're trying to
7565 * connect to device, keep connecting.
7566 */
7567 if (params->explicit_connect)
7568 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7569 break;
7570 case HCI_AUTO_CONN_REPORT:
7571 if (params->explicit_connect)
7572 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7573 else
7574 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7575 break;
7576 case HCI_AUTO_CONN_DIRECT:
7577 case HCI_AUTO_CONN_ALWAYS:
7578 if (!is_connected(hdev, addr, addr_type))
7579 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7580 break;
7581 }
7582
7583 params->auto_connect = auto_connect;
7584
7585 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7586 addr, addr_type, auto_connect);
7587
7588 return 0;
7589 }
7590
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7591 static void device_added(struct sock *sk, struct hci_dev *hdev,
7592 bdaddr_t *bdaddr, u8 type, u8 action)
7593 {
7594 struct mgmt_ev_device_added ev;
7595
7596 bacpy(&ev.addr.bdaddr, bdaddr);
7597 ev.addr.type = type;
7598 ev.action = action;
7599
7600 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7601 }
7602
add_device_complete(struct hci_dev * hdev,void * data,int err)7603 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7604 {
7605 struct mgmt_pending_cmd *cmd = data;
7606 struct mgmt_cp_add_device *cp = cmd->param;
7607
7608 if (!err) {
7609 struct hci_conn_params *params;
7610
7611 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7612 le_addr_type(cp->addr.type));
7613
7614 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7615 cp->action);
7616 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7617 cp->addr.type, hdev->conn_flags,
7618 params ? params->flags : 0);
7619 }
7620
7621 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7622 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7623 mgmt_pending_free(cmd);
7624 }
7625
add_device_sync(struct hci_dev * hdev,void * data)7626 static int add_device_sync(struct hci_dev *hdev, void *data)
7627 {
7628 return hci_update_passive_scan_sync(hdev);
7629 }
7630
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7631 static int add_device(struct sock *sk, struct hci_dev *hdev,
7632 void *data, u16 len)
7633 {
7634 struct mgmt_pending_cmd *cmd;
7635 struct mgmt_cp_add_device *cp = data;
7636 u8 auto_conn, addr_type;
7637 struct hci_conn_params *params;
7638 int err;
7639 u32 current_flags = 0;
7640 u32 supported_flags;
7641
7642 bt_dev_dbg(hdev, "sock %p", sk);
7643
7644 if (!bdaddr_type_is_valid(cp->addr.type) ||
7645 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7646 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7647 MGMT_STATUS_INVALID_PARAMS,
7648 &cp->addr, sizeof(cp->addr));
7649
7650 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7651 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7652 MGMT_STATUS_INVALID_PARAMS,
7653 &cp->addr, sizeof(cp->addr));
7654
7655 hci_dev_lock(hdev);
7656
7657 if (cp->addr.type == BDADDR_BREDR) {
7658 /* Only incoming connections action is supported for now */
7659 if (cp->action != 0x01) {
7660 err = mgmt_cmd_complete(sk, hdev->id,
7661 MGMT_OP_ADD_DEVICE,
7662 MGMT_STATUS_INVALID_PARAMS,
7663 &cp->addr, sizeof(cp->addr));
7664 goto unlock;
7665 }
7666
7667 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7668 &cp->addr.bdaddr,
7669 cp->addr.type, 0);
7670 if (err)
7671 goto unlock;
7672
7673 hci_update_scan(hdev);
7674
7675 goto added;
7676 }
7677
7678 addr_type = le_addr_type(cp->addr.type);
7679
7680 if (cp->action == 0x02)
7681 auto_conn = HCI_AUTO_CONN_ALWAYS;
7682 else if (cp->action == 0x01)
7683 auto_conn = HCI_AUTO_CONN_DIRECT;
7684 else
7685 auto_conn = HCI_AUTO_CONN_REPORT;
7686
7687 /* Kernel internally uses conn_params with resolvable private
7688 * address, but Add Device allows only identity addresses.
7689 * Make sure it is enforced before calling
7690 * hci_conn_params_lookup.
7691 */
7692 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7693 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7694 MGMT_STATUS_INVALID_PARAMS,
7695 &cp->addr, sizeof(cp->addr));
7696 goto unlock;
7697 }
7698
7699 /* If the connection parameters don't exist for this device,
7700 * they will be created and configured with defaults.
7701 */
7702 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7703 auto_conn) < 0) {
7704 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7705 MGMT_STATUS_FAILED, &cp->addr,
7706 sizeof(cp->addr));
7707 goto unlock;
7708 } else {
7709 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7710 addr_type);
7711 if (params)
7712 current_flags = params->flags;
7713 }
7714
7715 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7716 if (!cmd) {
7717 err = -ENOMEM;
7718 goto unlock;
7719 }
7720
7721 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7722 add_device_complete);
7723 if (err < 0) {
7724 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7725 MGMT_STATUS_FAILED, &cp->addr,
7726 sizeof(cp->addr));
7727 mgmt_pending_free(cmd);
7728 }
7729
7730 goto unlock;
7731
7732 added:
7733 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7734 supported_flags = hdev->conn_flags;
7735 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7736 supported_flags, current_flags);
7737
7738 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7739 MGMT_STATUS_SUCCESS, &cp->addr,
7740 sizeof(cp->addr));
7741
7742 unlock:
7743 hci_dev_unlock(hdev);
7744 return err;
7745 }
7746
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7747 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7748 bdaddr_t *bdaddr, u8 type)
7749 {
7750 struct mgmt_ev_device_removed ev;
7751
7752 bacpy(&ev.addr.bdaddr, bdaddr);
7753 ev.addr.type = type;
7754
7755 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7756 }
7757
remove_device_sync(struct hci_dev * hdev,void * data)7758 static int remove_device_sync(struct hci_dev *hdev, void *data)
7759 {
7760 return hci_update_passive_scan_sync(hdev);
7761 }
7762
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7763 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7764 void *data, u16 len)
7765 {
7766 struct mgmt_cp_remove_device *cp = data;
7767 int err;
7768
7769 bt_dev_dbg(hdev, "sock %p", sk);
7770
7771 hci_dev_lock(hdev);
7772
7773 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7774 struct hci_conn_params *params;
7775 u8 addr_type;
7776
7777 if (!bdaddr_type_is_valid(cp->addr.type)) {
7778 err = mgmt_cmd_complete(sk, hdev->id,
7779 MGMT_OP_REMOVE_DEVICE,
7780 MGMT_STATUS_INVALID_PARAMS,
7781 &cp->addr, sizeof(cp->addr));
7782 goto unlock;
7783 }
7784
7785 if (cp->addr.type == BDADDR_BREDR) {
7786 err = hci_bdaddr_list_del(&hdev->accept_list,
7787 &cp->addr.bdaddr,
7788 cp->addr.type);
7789 if (err) {
7790 err = mgmt_cmd_complete(sk, hdev->id,
7791 MGMT_OP_REMOVE_DEVICE,
7792 MGMT_STATUS_INVALID_PARAMS,
7793 &cp->addr,
7794 sizeof(cp->addr));
7795 goto unlock;
7796 }
7797
7798 hci_update_scan(hdev);
7799
7800 device_removed(sk, hdev, &cp->addr.bdaddr,
7801 cp->addr.type);
7802 goto complete;
7803 }
7804
7805 addr_type = le_addr_type(cp->addr.type);
7806
7807 /* Kernel internally uses conn_params with resolvable private
7808 * address, but Remove Device allows only identity addresses.
7809 * Make sure it is enforced before calling
7810 * hci_conn_params_lookup.
7811 */
7812 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7813 err = mgmt_cmd_complete(sk, hdev->id,
7814 MGMT_OP_REMOVE_DEVICE,
7815 MGMT_STATUS_INVALID_PARAMS,
7816 &cp->addr, sizeof(cp->addr));
7817 goto unlock;
7818 }
7819
7820 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7821 addr_type);
7822 if (!params) {
7823 err = mgmt_cmd_complete(sk, hdev->id,
7824 MGMT_OP_REMOVE_DEVICE,
7825 MGMT_STATUS_INVALID_PARAMS,
7826 &cp->addr, sizeof(cp->addr));
7827 goto unlock;
7828 }
7829
7830 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7831 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7832 err = mgmt_cmd_complete(sk, hdev->id,
7833 MGMT_OP_REMOVE_DEVICE,
7834 MGMT_STATUS_INVALID_PARAMS,
7835 &cp->addr, sizeof(cp->addr));
7836 goto unlock;
7837 }
7838
7839 hci_conn_params_free(params);
7840
7841 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7842 } else {
7843 struct hci_conn_params *p, *tmp;
7844 struct bdaddr_list *b, *btmp;
7845
7846 if (cp->addr.type) {
7847 err = mgmt_cmd_complete(sk, hdev->id,
7848 MGMT_OP_REMOVE_DEVICE,
7849 MGMT_STATUS_INVALID_PARAMS,
7850 &cp->addr, sizeof(cp->addr));
7851 goto unlock;
7852 }
7853
7854 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7855 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7856 list_del(&b->list);
7857 kfree(b);
7858 }
7859
7860 hci_update_scan(hdev);
7861
7862 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7863 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7864 continue;
7865 device_removed(sk, hdev, &p->addr, p->addr_type);
7866 if (p->explicit_connect) {
7867 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7868 continue;
7869 }
7870 hci_conn_params_free(p);
7871 }
7872
7873 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7874 }
7875
7876 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7877
7878 complete:
7879 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7880 MGMT_STATUS_SUCCESS, &cp->addr,
7881 sizeof(cp->addr));
7882 unlock:
7883 hci_dev_unlock(hdev);
7884 return err;
7885 }
7886
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7887 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7888 u16 len)
7889 {
7890 struct mgmt_cp_load_conn_param *cp = data;
7891 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7892 sizeof(struct mgmt_conn_param));
7893 u16 param_count, expected_len;
7894 int i;
7895
7896 if (!lmp_le_capable(hdev))
7897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7898 MGMT_STATUS_NOT_SUPPORTED);
7899
7900 param_count = __le16_to_cpu(cp->param_count);
7901 if (param_count > max_param_count) {
7902 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7903 param_count);
7904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7905 MGMT_STATUS_INVALID_PARAMS);
7906 }
7907
7908 expected_len = struct_size(cp, params, param_count);
7909 if (expected_len != len) {
7910 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7911 expected_len, len);
7912 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7913 MGMT_STATUS_INVALID_PARAMS);
7914 }
7915
7916 bt_dev_dbg(hdev, "param_count %u", param_count);
7917
7918 hci_dev_lock(hdev);
7919
7920 hci_conn_params_clear_disabled(hdev);
7921
7922 for (i = 0; i < param_count; i++) {
7923 struct mgmt_conn_param *param = &cp->params[i];
7924 struct hci_conn_params *hci_param;
7925 u16 min, max, latency, timeout;
7926 u8 addr_type;
7927
7928 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7929 param->addr.type);
7930
7931 if (param->addr.type == BDADDR_LE_PUBLIC) {
7932 addr_type = ADDR_LE_DEV_PUBLIC;
7933 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7934 addr_type = ADDR_LE_DEV_RANDOM;
7935 } else {
7936 bt_dev_err(hdev, "ignoring invalid connection parameters");
7937 continue;
7938 }
7939
7940 min = le16_to_cpu(param->min_interval);
7941 max = le16_to_cpu(param->max_interval);
7942 latency = le16_to_cpu(param->latency);
7943 timeout = le16_to_cpu(param->timeout);
7944
7945 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7946 min, max, latency, timeout);
7947
7948 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7949 bt_dev_err(hdev, "ignoring invalid connection parameters");
7950 continue;
7951 }
7952
7953 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7954 addr_type);
7955 if (!hci_param) {
7956 bt_dev_err(hdev, "failed to add connection parameters");
7957 continue;
7958 }
7959
7960 hci_param->conn_min_interval = min;
7961 hci_param->conn_max_interval = max;
7962 hci_param->conn_latency = latency;
7963 hci_param->supervision_timeout = timeout;
7964 }
7965
7966 hci_dev_unlock(hdev);
7967
7968 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7969 NULL, 0);
7970 }
7971
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7972 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7973 void *data, u16 len)
7974 {
7975 struct mgmt_cp_set_external_config *cp = data;
7976 bool changed;
7977 int err;
7978
7979 bt_dev_dbg(hdev, "sock %p", sk);
7980
7981 if (hdev_is_powered(hdev))
7982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7983 MGMT_STATUS_REJECTED);
7984
7985 if (cp->config != 0x00 && cp->config != 0x01)
7986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7987 MGMT_STATUS_INVALID_PARAMS);
7988
7989 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7991 MGMT_STATUS_NOT_SUPPORTED);
7992
7993 hci_dev_lock(hdev);
7994
7995 if (cp->config)
7996 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7997 else
7998 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7999
8000 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8001 if (err < 0)
8002 goto unlock;
8003
8004 if (!changed)
8005 goto unlock;
8006
8007 err = new_options(hdev, sk);
8008
8009 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8010 mgmt_index_removed(hdev);
8011
8012 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8013 hci_dev_set_flag(hdev, HCI_CONFIG);
8014 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8015
8016 queue_work(hdev->req_workqueue, &hdev->power_on);
8017 } else {
8018 set_bit(HCI_RAW, &hdev->flags);
8019 mgmt_index_added(hdev);
8020 }
8021 }
8022
8023 unlock:
8024 hci_dev_unlock(hdev);
8025 return err;
8026 }
8027
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8028 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8029 void *data, u16 len)
8030 {
8031 struct mgmt_cp_set_public_address *cp = data;
8032 bool changed;
8033 int err;
8034
8035 bt_dev_dbg(hdev, "sock %p", sk);
8036
8037 if (hdev_is_powered(hdev))
8038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8039 MGMT_STATUS_REJECTED);
8040
8041 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8043 MGMT_STATUS_INVALID_PARAMS);
8044
8045 if (!hdev->set_bdaddr)
8046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8047 MGMT_STATUS_NOT_SUPPORTED);
8048
8049 hci_dev_lock(hdev);
8050
8051 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8052 bacpy(&hdev->public_addr, &cp->bdaddr);
8053
8054 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8055 if (err < 0)
8056 goto unlock;
8057
8058 if (!changed)
8059 goto unlock;
8060
8061 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8062 err = new_options(hdev, sk);
8063
8064 if (is_configured(hdev)) {
8065 mgmt_index_removed(hdev);
8066
8067 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8068
8069 hci_dev_set_flag(hdev, HCI_CONFIG);
8070 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8071
8072 queue_work(hdev->req_workqueue, &hdev->power_on);
8073 }
8074
8075 unlock:
8076 hci_dev_unlock(hdev);
8077 return err;
8078 }
8079
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8080 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8081 int err)
8082 {
8083 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8084 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8085 u8 *h192, *r192, *h256, *r256;
8086 struct mgmt_pending_cmd *cmd = data;
8087 struct sk_buff *skb = cmd->skb;
8088 u8 status = mgmt_status(err);
8089 u16 eir_len;
8090
8091 if (err == -ECANCELED ||
8092 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8093 return;
8094
8095 if (!status) {
8096 if (!skb)
8097 status = MGMT_STATUS_FAILED;
8098 else if (IS_ERR(skb))
8099 status = mgmt_status(PTR_ERR(skb));
8100 else
8101 status = mgmt_status(skb->data[0]);
8102 }
8103
8104 bt_dev_dbg(hdev, "status %u", status);
8105
8106 mgmt_cp = cmd->param;
8107
8108 if (status) {
8109 status = mgmt_status(status);
8110 eir_len = 0;
8111
8112 h192 = NULL;
8113 r192 = NULL;
8114 h256 = NULL;
8115 r256 = NULL;
8116 } else if (!bredr_sc_enabled(hdev)) {
8117 struct hci_rp_read_local_oob_data *rp;
8118
8119 if (skb->len != sizeof(*rp)) {
8120 status = MGMT_STATUS_FAILED;
8121 eir_len = 0;
8122 } else {
8123 status = MGMT_STATUS_SUCCESS;
8124 rp = (void *)skb->data;
8125
8126 eir_len = 5 + 18 + 18;
8127 h192 = rp->hash;
8128 r192 = rp->rand;
8129 h256 = NULL;
8130 r256 = NULL;
8131 }
8132 } else {
8133 struct hci_rp_read_local_oob_ext_data *rp;
8134
8135 if (skb->len != sizeof(*rp)) {
8136 status = MGMT_STATUS_FAILED;
8137 eir_len = 0;
8138 } else {
8139 status = MGMT_STATUS_SUCCESS;
8140 rp = (void *)skb->data;
8141
8142 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8143 eir_len = 5 + 18 + 18;
8144 h192 = NULL;
8145 r192 = NULL;
8146 } else {
8147 eir_len = 5 + 18 + 18 + 18 + 18;
8148 h192 = rp->hash192;
8149 r192 = rp->rand192;
8150 }
8151
8152 h256 = rp->hash256;
8153 r256 = rp->rand256;
8154 }
8155 }
8156
8157 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8158 if (!mgmt_rp)
8159 goto done;
8160
8161 if (eir_len == 0)
8162 goto send_rsp;
8163
8164 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8165 hdev->dev_class, 3);
8166
8167 if (h192 && r192) {
8168 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8169 EIR_SSP_HASH_C192, h192, 16);
8170 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8171 EIR_SSP_RAND_R192, r192, 16);
8172 }
8173
8174 if (h256 && r256) {
8175 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8176 EIR_SSP_HASH_C256, h256, 16);
8177 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8178 EIR_SSP_RAND_R256, r256, 16);
8179 }
8180
8181 send_rsp:
8182 mgmt_rp->type = mgmt_cp->type;
8183 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8184
8185 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8186 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8187 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8188 if (err < 0 || status)
8189 goto done;
8190
8191 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8192
8193 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8194 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8195 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8196 done:
8197 if (skb && !IS_ERR(skb))
8198 kfree_skb(skb);
8199
8200 kfree(mgmt_rp);
8201 mgmt_pending_remove(cmd);
8202 }
8203
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8204 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8205 struct mgmt_cp_read_local_oob_ext_data *cp)
8206 {
8207 struct mgmt_pending_cmd *cmd;
8208 int err;
8209
8210 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8211 cp, sizeof(*cp));
8212 if (!cmd)
8213 return -ENOMEM;
8214
8215 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8216 read_local_oob_ext_data_complete);
8217
8218 if (err < 0) {
8219 mgmt_pending_remove(cmd);
8220 return err;
8221 }
8222
8223 return 0;
8224 }
8225
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8226 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8227 void *data, u16 data_len)
8228 {
8229 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8230 struct mgmt_rp_read_local_oob_ext_data *rp;
8231 size_t rp_len;
8232 u16 eir_len;
8233 u8 status, flags, role, addr[7], hash[16], rand[16];
8234 int err;
8235
8236 bt_dev_dbg(hdev, "sock %p", sk);
8237
8238 if (hdev_is_powered(hdev)) {
8239 switch (cp->type) {
8240 case BIT(BDADDR_BREDR):
8241 status = mgmt_bredr_support(hdev);
8242 if (status)
8243 eir_len = 0;
8244 else
8245 eir_len = 5;
8246 break;
8247 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8248 status = mgmt_le_support(hdev);
8249 if (status)
8250 eir_len = 0;
8251 else
8252 eir_len = 9 + 3 + 18 + 18 + 3;
8253 break;
8254 default:
8255 status = MGMT_STATUS_INVALID_PARAMS;
8256 eir_len = 0;
8257 break;
8258 }
8259 } else {
8260 status = MGMT_STATUS_NOT_POWERED;
8261 eir_len = 0;
8262 }
8263
8264 rp_len = sizeof(*rp) + eir_len;
8265 rp = kmalloc(rp_len, GFP_ATOMIC);
8266 if (!rp)
8267 return -ENOMEM;
8268
8269 if (!status && !lmp_ssp_capable(hdev)) {
8270 status = MGMT_STATUS_NOT_SUPPORTED;
8271 eir_len = 0;
8272 }
8273
8274 if (status)
8275 goto complete;
8276
8277 hci_dev_lock(hdev);
8278
8279 eir_len = 0;
8280 switch (cp->type) {
8281 case BIT(BDADDR_BREDR):
8282 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8283 err = read_local_ssp_oob_req(hdev, sk, cp);
8284 hci_dev_unlock(hdev);
8285 if (!err)
8286 goto done;
8287
8288 status = MGMT_STATUS_FAILED;
8289 goto complete;
8290 } else {
8291 eir_len = eir_append_data(rp->eir, eir_len,
8292 EIR_CLASS_OF_DEV,
8293 hdev->dev_class, 3);
8294 }
8295 break;
8296 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8297 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8298 smp_generate_oob(hdev, hash, rand) < 0) {
8299 hci_dev_unlock(hdev);
8300 status = MGMT_STATUS_FAILED;
8301 goto complete;
8302 }
8303
8304 /* This should return the active RPA, but since the RPA
8305 * is only programmed on demand, it is really hard to fill
8306 * this in at the moment. For now disallow retrieving
8307 * local out-of-band data when privacy is in use.
8308 *
8309 * Returning the identity address will not help here since
8310 * pairing happens before the identity resolving key is
8311 * known and thus the connection establishment happens
8312 * based on the RPA and not the identity address.
8313 */
8314 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8315 hci_dev_unlock(hdev);
8316 status = MGMT_STATUS_REJECTED;
8317 goto complete;
8318 }
8319
8320 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8321 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8322 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8323 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8324 memcpy(addr, &hdev->static_addr, 6);
8325 addr[6] = 0x01;
8326 } else {
8327 memcpy(addr, &hdev->bdaddr, 6);
8328 addr[6] = 0x00;
8329 }
8330
8331 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8332 addr, sizeof(addr));
8333
8334 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8335 role = 0x02;
8336 else
8337 role = 0x01;
8338
8339 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8340 &role, sizeof(role));
8341
8342 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8343 eir_len = eir_append_data(rp->eir, eir_len,
8344 EIR_LE_SC_CONFIRM,
8345 hash, sizeof(hash));
8346
8347 eir_len = eir_append_data(rp->eir, eir_len,
8348 EIR_LE_SC_RANDOM,
8349 rand, sizeof(rand));
8350 }
8351
8352 flags = mgmt_get_adv_discov_flags(hdev);
8353
8354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8355 flags |= LE_AD_NO_BREDR;
8356
8357 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8358 &flags, sizeof(flags));
8359 break;
8360 }
8361
8362 hci_dev_unlock(hdev);
8363
8364 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8365
8366 status = MGMT_STATUS_SUCCESS;
8367
8368 complete:
8369 rp->type = cp->type;
8370 rp->eir_len = cpu_to_le16(eir_len);
8371
8372 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8373 status, rp, sizeof(*rp) + eir_len);
8374 if (err < 0 || status)
8375 goto done;
8376
8377 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8378 rp, sizeof(*rp) + eir_len,
8379 HCI_MGMT_OOB_DATA_EVENTS, sk);
8380
8381 done:
8382 kfree(rp);
8383
8384 return err;
8385 }
8386
get_supported_adv_flags(struct hci_dev * hdev)8387 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8388 {
8389 u32 flags = 0;
8390
8391 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8392 flags |= MGMT_ADV_FLAG_DISCOV;
8393 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8394 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8395 flags |= MGMT_ADV_FLAG_APPEARANCE;
8396 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8397 flags |= MGMT_ADV_PARAM_DURATION;
8398 flags |= MGMT_ADV_PARAM_TIMEOUT;
8399 flags |= MGMT_ADV_PARAM_INTERVALS;
8400 flags |= MGMT_ADV_PARAM_TX_POWER;
8401 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8402
8403 /* In extended adv TX_POWER returned from Set Adv Param
8404 * will be always valid.
8405 */
8406 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8407 flags |= MGMT_ADV_FLAG_TX_POWER;
8408
8409 if (ext_adv_capable(hdev)) {
8410 flags |= MGMT_ADV_FLAG_SEC_1M;
8411 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8412 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8413
8414 if (le_2m_capable(hdev))
8415 flags |= MGMT_ADV_FLAG_SEC_2M;
8416
8417 if (le_coded_capable(hdev))
8418 flags |= MGMT_ADV_FLAG_SEC_CODED;
8419 }
8420
8421 return flags;
8422 }
8423
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8424 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8425 void *data, u16 data_len)
8426 {
8427 struct mgmt_rp_read_adv_features *rp;
8428 size_t rp_len;
8429 int err;
8430 struct adv_info *adv_instance;
8431 u32 supported_flags;
8432 u8 *instance;
8433
8434 bt_dev_dbg(hdev, "sock %p", sk);
8435
8436 if (!lmp_le_capable(hdev))
8437 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8438 MGMT_STATUS_REJECTED);
8439
8440 hci_dev_lock(hdev);
8441
8442 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8443 rp = kmalloc(rp_len, GFP_ATOMIC);
8444 if (!rp) {
8445 hci_dev_unlock(hdev);
8446 return -ENOMEM;
8447 }
8448
8449 supported_flags = get_supported_adv_flags(hdev);
8450
8451 rp->supported_flags = cpu_to_le32(supported_flags);
8452 rp->max_adv_data_len = max_adv_len(hdev);
8453 rp->max_scan_rsp_len = max_adv_len(hdev);
8454 rp->max_instances = hdev->le_num_of_adv_sets;
8455 rp->num_instances = hdev->adv_instance_cnt;
8456
8457 instance = rp->instance;
8458 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8459 /* Only instances 1-le_num_of_adv_sets are externally visible */
8460 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8461 *instance = adv_instance->instance;
8462 instance++;
8463 } else {
8464 rp->num_instances--;
8465 rp_len--;
8466 }
8467 }
8468
8469 hci_dev_unlock(hdev);
8470
8471 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8472 MGMT_STATUS_SUCCESS, rp, rp_len);
8473
8474 kfree(rp);
8475
8476 return err;
8477 }
8478
calculate_name_len(struct hci_dev * hdev)8479 static u8 calculate_name_len(struct hci_dev *hdev)
8480 {
8481 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8482
8483 return eir_append_local_name(hdev, buf, 0);
8484 }
8485
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8486 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8487 bool is_adv_data)
8488 {
8489 u8 max_len = max_adv_len(hdev);
8490
8491 if (is_adv_data) {
8492 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8493 MGMT_ADV_FLAG_LIMITED_DISCOV |
8494 MGMT_ADV_FLAG_MANAGED_FLAGS))
8495 max_len -= 3;
8496
8497 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8498 max_len -= 3;
8499 } else {
8500 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8501 max_len -= calculate_name_len(hdev);
8502
8503 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8504 max_len -= 4;
8505 }
8506
8507 return max_len;
8508 }
8509
flags_managed(u32 adv_flags)8510 static bool flags_managed(u32 adv_flags)
8511 {
8512 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8513 MGMT_ADV_FLAG_LIMITED_DISCOV |
8514 MGMT_ADV_FLAG_MANAGED_FLAGS);
8515 }
8516
tx_power_managed(u32 adv_flags)8517 static bool tx_power_managed(u32 adv_flags)
8518 {
8519 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8520 }
8521
name_managed(u32 adv_flags)8522 static bool name_managed(u32 adv_flags)
8523 {
8524 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8525 }
8526
appearance_managed(u32 adv_flags)8527 static bool appearance_managed(u32 adv_flags)
8528 {
8529 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8530 }
8531
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8532 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8533 u8 len, bool is_adv_data)
8534 {
8535 int i, cur_len;
8536 u8 max_len;
8537
8538 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8539
8540 if (len > max_len)
8541 return false;
8542
8543 /* Make sure that the data is correctly formatted. */
8544 for (i = 0; i < len; i += (cur_len + 1)) {
8545 cur_len = data[i];
8546
8547 if (!cur_len)
8548 continue;
8549
8550 if (data[i + 1] == EIR_FLAGS &&
8551 (!is_adv_data || flags_managed(adv_flags)))
8552 return false;
8553
8554 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8555 return false;
8556
8557 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8558 return false;
8559
8560 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8561 return false;
8562
8563 if (data[i + 1] == EIR_APPEARANCE &&
8564 appearance_managed(adv_flags))
8565 return false;
8566
8567 /* If the current field length would exceed the total data
8568 * length, then it's invalid.
8569 */
8570 if (i + cur_len >= len)
8571 return false;
8572 }
8573
8574 return true;
8575 }
8576
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8577 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8578 {
8579 u32 supported_flags, phy_flags;
8580
8581 /* The current implementation only supports a subset of the specified
8582 * flags. Also need to check mutual exclusiveness of sec flags.
8583 */
8584 supported_flags = get_supported_adv_flags(hdev);
8585 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8586 if (adv_flags & ~supported_flags ||
8587 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8588 return false;
8589
8590 return true;
8591 }
8592
adv_busy(struct hci_dev * hdev)8593 static bool adv_busy(struct hci_dev *hdev)
8594 {
8595 return pending_find(MGMT_OP_SET_LE, hdev);
8596 }
8597
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8598 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8599 int err)
8600 {
8601 struct adv_info *adv, *n;
8602
8603 bt_dev_dbg(hdev, "err %d", err);
8604
8605 hci_dev_lock(hdev);
8606
8607 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8608 u8 instance;
8609
8610 if (!adv->pending)
8611 continue;
8612
8613 if (!err) {
8614 adv->pending = false;
8615 continue;
8616 }
8617
8618 instance = adv->instance;
8619
8620 if (hdev->cur_adv_instance == instance)
8621 cancel_adv_timeout(hdev);
8622
8623 hci_remove_adv_instance(hdev, instance);
8624 mgmt_advertising_removed(sk, hdev, instance);
8625 }
8626
8627 hci_dev_unlock(hdev);
8628 }
8629
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8630 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8631 {
8632 struct mgmt_pending_cmd *cmd = data;
8633 struct mgmt_cp_add_advertising *cp = cmd->param;
8634 struct mgmt_rp_add_advertising rp;
8635
8636 memset(&rp, 0, sizeof(rp));
8637
8638 rp.instance = cp->instance;
8639
8640 if (err)
8641 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8642 mgmt_status(err));
8643 else
8644 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8645 mgmt_status(err), &rp, sizeof(rp));
8646
8647 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8648
8649 mgmt_pending_free(cmd);
8650 }
8651
add_advertising_sync(struct hci_dev * hdev,void * data)8652 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8653 {
8654 struct mgmt_pending_cmd *cmd = data;
8655 struct mgmt_cp_add_advertising *cp = cmd->param;
8656
8657 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8658 }
8659
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8660 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8661 void *data, u16 data_len)
8662 {
8663 struct mgmt_cp_add_advertising *cp = data;
8664 struct mgmt_rp_add_advertising rp;
8665 u32 flags;
8666 u8 status;
8667 u16 timeout, duration;
8668 unsigned int prev_instance_cnt;
8669 u8 schedule_instance = 0;
8670 struct adv_info *adv, *next_instance;
8671 int err;
8672 struct mgmt_pending_cmd *cmd;
8673
8674 bt_dev_dbg(hdev, "sock %p", sk);
8675
8676 status = mgmt_le_support(hdev);
8677 if (status)
8678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8679 status);
8680
8681 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8683 MGMT_STATUS_INVALID_PARAMS);
8684
8685 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8687 MGMT_STATUS_INVALID_PARAMS);
8688
8689 flags = __le32_to_cpu(cp->flags);
8690 timeout = __le16_to_cpu(cp->timeout);
8691 duration = __le16_to_cpu(cp->duration);
8692
8693 if (!requested_adv_flags_are_valid(hdev, flags))
8694 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8695 MGMT_STATUS_INVALID_PARAMS);
8696
8697 hci_dev_lock(hdev);
8698
8699 if (timeout && !hdev_is_powered(hdev)) {
8700 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8701 MGMT_STATUS_REJECTED);
8702 goto unlock;
8703 }
8704
8705 if (adv_busy(hdev)) {
8706 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8707 MGMT_STATUS_BUSY);
8708 goto unlock;
8709 }
8710
8711 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8712 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8713 cp->scan_rsp_len, false)) {
8714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8715 MGMT_STATUS_INVALID_PARAMS);
8716 goto unlock;
8717 }
8718
8719 prev_instance_cnt = hdev->adv_instance_cnt;
8720
8721 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8722 cp->adv_data_len, cp->data,
8723 cp->scan_rsp_len,
8724 cp->data + cp->adv_data_len,
8725 timeout, duration,
8726 HCI_ADV_TX_POWER_NO_PREFERENCE,
8727 hdev->le_adv_min_interval,
8728 hdev->le_adv_max_interval, 0);
8729 if (IS_ERR(adv)) {
8730 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8731 MGMT_STATUS_FAILED);
8732 goto unlock;
8733 }
8734
8735 /* Only trigger an advertising added event if a new instance was
8736 * actually added.
8737 */
8738 if (hdev->adv_instance_cnt > prev_instance_cnt)
8739 mgmt_advertising_added(sk, hdev, cp->instance);
8740
8741 if (hdev->cur_adv_instance == cp->instance) {
8742 /* If the currently advertised instance is being changed then
8743 * cancel the current advertising and schedule the next
8744 * instance. If there is only one instance then the overridden
8745 * advertising data will be visible right away.
8746 */
8747 cancel_adv_timeout(hdev);
8748
8749 next_instance = hci_get_next_instance(hdev, cp->instance);
8750 if (next_instance)
8751 schedule_instance = next_instance->instance;
8752 } else if (!hdev->adv_instance_timeout) {
8753 /* Immediately advertise the new instance if no other
8754 * instance is currently being advertised.
8755 */
8756 schedule_instance = cp->instance;
8757 }
8758
8759 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8760 * there is no instance to be advertised then we have no HCI
8761 * communication to make. Simply return.
8762 */
8763 if (!hdev_is_powered(hdev) ||
8764 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8765 !schedule_instance) {
8766 rp.instance = cp->instance;
8767 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8768 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8769 goto unlock;
8770 }
8771
8772 /* We're good to go, update advertising data, parameters, and start
8773 * advertising.
8774 */
8775 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8776 data_len);
8777 if (!cmd) {
8778 err = -ENOMEM;
8779 goto unlock;
8780 }
8781
8782 cp->instance = schedule_instance;
8783
8784 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8785 add_advertising_complete);
8786 if (err < 0)
8787 mgmt_pending_free(cmd);
8788
8789 unlock:
8790 hci_dev_unlock(hdev);
8791
8792 return err;
8793 }
8794
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8795 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8796 int err)
8797 {
8798 struct mgmt_pending_cmd *cmd = data;
8799 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8800 struct mgmt_rp_add_ext_adv_params rp;
8801 struct adv_info *adv;
8802 u32 flags;
8803
8804 BT_DBG("%s", hdev->name);
8805
8806 hci_dev_lock(hdev);
8807
8808 adv = hci_find_adv_instance(hdev, cp->instance);
8809 if (!adv)
8810 goto unlock;
8811
8812 rp.instance = cp->instance;
8813 rp.tx_power = adv->tx_power;
8814
8815 /* While we're at it, inform userspace of the available space for this
8816 * advertisement, given the flags that will be used.
8817 */
8818 flags = __le32_to_cpu(cp->flags);
8819 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8820 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8821
8822 if (err) {
8823 /* If this advertisement was previously advertising and we
8824 * failed to update it, we signal that it has been removed and
8825 * delete its structure
8826 */
8827 if (!adv->pending)
8828 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8829
8830 hci_remove_adv_instance(hdev, cp->instance);
8831
8832 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8833 mgmt_status(err));
8834 } else {
8835 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8836 mgmt_status(err), &rp, sizeof(rp));
8837 }
8838
8839 unlock:
8840 if (cmd)
8841 mgmt_pending_free(cmd);
8842
8843 hci_dev_unlock(hdev);
8844 }
8845
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8846 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8847 {
8848 struct mgmt_pending_cmd *cmd = data;
8849 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8850
8851 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8852 }
8853
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8854 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8855 void *data, u16 data_len)
8856 {
8857 struct mgmt_cp_add_ext_adv_params *cp = data;
8858 struct mgmt_rp_add_ext_adv_params rp;
8859 struct mgmt_pending_cmd *cmd = NULL;
8860 struct adv_info *adv;
8861 u32 flags, min_interval, max_interval;
8862 u16 timeout, duration;
8863 u8 status;
8864 s8 tx_power;
8865 int err;
8866
8867 BT_DBG("%s", hdev->name);
8868
8869 status = mgmt_le_support(hdev);
8870 if (status)
8871 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8872 status);
8873
8874 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8876 MGMT_STATUS_INVALID_PARAMS);
8877
8878 /* The purpose of breaking add_advertising into two separate MGMT calls
8879 * for params and data is to allow more parameters to be added to this
8880 * structure in the future. For this reason, we verify that we have the
8881 * bare minimum structure we know of when the interface was defined. Any
8882 * extra parameters we don't know about will be ignored in this request.
8883 */
8884 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8885 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8886 MGMT_STATUS_INVALID_PARAMS);
8887
8888 flags = __le32_to_cpu(cp->flags);
8889
8890 if (!requested_adv_flags_are_valid(hdev, flags))
8891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8892 MGMT_STATUS_INVALID_PARAMS);
8893
8894 hci_dev_lock(hdev);
8895
8896 /* In new interface, we require that we are powered to register */
8897 if (!hdev_is_powered(hdev)) {
8898 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8899 MGMT_STATUS_REJECTED);
8900 goto unlock;
8901 }
8902
8903 if (adv_busy(hdev)) {
8904 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8905 MGMT_STATUS_BUSY);
8906 goto unlock;
8907 }
8908
8909 /* Parse defined parameters from request, use defaults otherwise */
8910 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8911 __le16_to_cpu(cp->timeout) : 0;
8912
8913 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8914 __le16_to_cpu(cp->duration) :
8915 hdev->def_multi_adv_rotation_duration;
8916
8917 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8918 __le32_to_cpu(cp->min_interval) :
8919 hdev->le_adv_min_interval;
8920
8921 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8922 __le32_to_cpu(cp->max_interval) :
8923 hdev->le_adv_max_interval;
8924
8925 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8926 cp->tx_power :
8927 HCI_ADV_TX_POWER_NO_PREFERENCE;
8928
8929 /* Create advertising instance with no advertising or response data */
8930 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8931 timeout, duration, tx_power, min_interval,
8932 max_interval, 0);
8933
8934 if (IS_ERR(adv)) {
8935 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8936 MGMT_STATUS_FAILED);
8937 goto unlock;
8938 }
8939
8940 /* Submit request for advertising params if ext adv available */
8941 if (ext_adv_capable(hdev)) {
8942 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8943 data, data_len);
8944 if (!cmd) {
8945 err = -ENOMEM;
8946 hci_remove_adv_instance(hdev, cp->instance);
8947 goto unlock;
8948 }
8949
8950 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8951 add_ext_adv_params_complete);
8952 if (err < 0)
8953 mgmt_pending_free(cmd);
8954 } else {
8955 rp.instance = cp->instance;
8956 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8957 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8958 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8959 err = mgmt_cmd_complete(sk, hdev->id,
8960 MGMT_OP_ADD_EXT_ADV_PARAMS,
8961 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8962 }
8963
8964 unlock:
8965 hci_dev_unlock(hdev);
8966
8967 return err;
8968 }
8969
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8970 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8971 {
8972 struct mgmt_pending_cmd *cmd = data;
8973 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8974 struct mgmt_rp_add_advertising rp;
8975
8976 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8977
8978 memset(&rp, 0, sizeof(rp));
8979
8980 rp.instance = cp->instance;
8981
8982 if (err)
8983 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8984 mgmt_status(err));
8985 else
8986 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8987 mgmt_status(err), &rp, sizeof(rp));
8988
8989 mgmt_pending_free(cmd);
8990 }
8991
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8992 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8993 {
8994 struct mgmt_pending_cmd *cmd = data;
8995 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8996 int err;
8997
8998 if (ext_adv_capable(hdev)) {
8999 err = hci_update_adv_data_sync(hdev, cp->instance);
9000 if (err)
9001 return err;
9002
9003 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9004 if (err)
9005 return err;
9006
9007 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9008 }
9009
9010 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9011 }
9012
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9013 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9014 u16 data_len)
9015 {
9016 struct mgmt_cp_add_ext_adv_data *cp = data;
9017 struct mgmt_rp_add_ext_adv_data rp;
9018 u8 schedule_instance = 0;
9019 struct adv_info *next_instance;
9020 struct adv_info *adv_instance;
9021 int err = 0;
9022 struct mgmt_pending_cmd *cmd;
9023
9024 BT_DBG("%s", hdev->name);
9025
9026 hci_dev_lock(hdev);
9027
9028 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9029
9030 if (!adv_instance) {
9031 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9032 MGMT_STATUS_INVALID_PARAMS);
9033 goto unlock;
9034 }
9035
9036 /* In new interface, we require that we are powered to register */
9037 if (!hdev_is_powered(hdev)) {
9038 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9039 MGMT_STATUS_REJECTED);
9040 goto clear_new_instance;
9041 }
9042
9043 if (adv_busy(hdev)) {
9044 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9045 MGMT_STATUS_BUSY);
9046 goto clear_new_instance;
9047 }
9048
9049 /* Validate new data */
9050 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9051 cp->adv_data_len, true) ||
9052 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9053 cp->adv_data_len, cp->scan_rsp_len, false)) {
9054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9055 MGMT_STATUS_INVALID_PARAMS);
9056 goto clear_new_instance;
9057 }
9058
9059 /* Set the data in the advertising instance */
9060 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9061 cp->data, cp->scan_rsp_len,
9062 cp->data + cp->adv_data_len);
9063
9064 /* If using software rotation, determine next instance to use */
9065 if (hdev->cur_adv_instance == cp->instance) {
9066 /* If the currently advertised instance is being changed
9067 * then cancel the current advertising and schedule the
9068 * next instance. If there is only one instance then the
9069 * overridden advertising data will be visible right
9070 * away
9071 */
9072 cancel_adv_timeout(hdev);
9073
9074 next_instance = hci_get_next_instance(hdev, cp->instance);
9075 if (next_instance)
9076 schedule_instance = next_instance->instance;
9077 } else if (!hdev->adv_instance_timeout) {
9078 /* Immediately advertise the new instance if no other
9079 * instance is currently being advertised.
9080 */
9081 schedule_instance = cp->instance;
9082 }
9083
9084 /* If the HCI_ADVERTISING flag is set or there is no instance to
9085 * be advertised then we have no HCI communication to make.
9086 * Simply return.
9087 */
9088 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9089 if (adv_instance->pending) {
9090 mgmt_advertising_added(sk, hdev, cp->instance);
9091 adv_instance->pending = false;
9092 }
9093 rp.instance = cp->instance;
9094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9095 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9096 goto unlock;
9097 }
9098
9099 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9100 data_len);
9101 if (!cmd) {
9102 err = -ENOMEM;
9103 goto clear_new_instance;
9104 }
9105
9106 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9107 add_ext_adv_data_complete);
9108 if (err < 0) {
9109 mgmt_pending_free(cmd);
9110 goto clear_new_instance;
9111 }
9112
9113 /* We were successful in updating data, so trigger advertising_added
9114 * event if this is an instance that wasn't previously advertising. If
9115 * a failure occurs in the requests we initiated, we will remove the
9116 * instance again in add_advertising_complete
9117 */
9118 if (adv_instance->pending)
9119 mgmt_advertising_added(sk, hdev, cp->instance);
9120
9121 goto unlock;
9122
9123 clear_new_instance:
9124 hci_remove_adv_instance(hdev, cp->instance);
9125
9126 unlock:
9127 hci_dev_unlock(hdev);
9128
9129 return err;
9130 }
9131
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9132 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9133 int err)
9134 {
9135 struct mgmt_pending_cmd *cmd = data;
9136 struct mgmt_cp_remove_advertising *cp = cmd->param;
9137 struct mgmt_rp_remove_advertising rp;
9138
9139 bt_dev_dbg(hdev, "err %d", err);
9140
9141 memset(&rp, 0, sizeof(rp));
9142 rp.instance = cp->instance;
9143
9144 if (err)
9145 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9146 mgmt_status(err));
9147 else
9148 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9149 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9150
9151 mgmt_pending_free(cmd);
9152 }
9153
remove_advertising_sync(struct hci_dev * hdev,void * data)9154 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9155 {
9156 struct mgmt_pending_cmd *cmd = data;
9157 struct mgmt_cp_remove_advertising *cp = cmd->param;
9158 int err;
9159
9160 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9161 if (err)
9162 return err;
9163
9164 if (list_empty(&hdev->adv_instances))
9165 err = hci_disable_advertising_sync(hdev);
9166
9167 return err;
9168 }
9169
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9170 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9171 void *data, u16 data_len)
9172 {
9173 struct mgmt_cp_remove_advertising *cp = data;
9174 struct mgmt_pending_cmd *cmd;
9175 int err;
9176
9177 bt_dev_dbg(hdev, "sock %p", sk);
9178
9179 hci_dev_lock(hdev);
9180
9181 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9182 err = mgmt_cmd_status(sk, hdev->id,
9183 MGMT_OP_REMOVE_ADVERTISING,
9184 MGMT_STATUS_INVALID_PARAMS);
9185 goto unlock;
9186 }
9187
9188 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9189 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9190 MGMT_STATUS_BUSY);
9191 goto unlock;
9192 }
9193
9194 if (list_empty(&hdev->adv_instances)) {
9195 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9196 MGMT_STATUS_INVALID_PARAMS);
9197 goto unlock;
9198 }
9199
9200 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9201 data_len);
9202 if (!cmd) {
9203 err = -ENOMEM;
9204 goto unlock;
9205 }
9206
9207 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9208 remove_advertising_complete);
9209 if (err < 0)
9210 mgmt_pending_free(cmd);
9211
9212 unlock:
9213 hci_dev_unlock(hdev);
9214
9215 return err;
9216 }
9217
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9218 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9219 void *data, u16 data_len)
9220 {
9221 struct mgmt_cp_get_adv_size_info *cp = data;
9222 struct mgmt_rp_get_adv_size_info rp;
9223 u32 flags, supported_flags;
9224
9225 bt_dev_dbg(hdev, "sock %p", sk);
9226
9227 if (!lmp_le_capable(hdev))
9228 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9229 MGMT_STATUS_REJECTED);
9230
9231 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9232 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9233 MGMT_STATUS_INVALID_PARAMS);
9234
9235 flags = __le32_to_cpu(cp->flags);
9236
9237 /* The current implementation only supports a subset of the specified
9238 * flags.
9239 */
9240 supported_flags = get_supported_adv_flags(hdev);
9241 if (flags & ~supported_flags)
9242 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9243 MGMT_STATUS_INVALID_PARAMS);
9244
9245 rp.instance = cp->instance;
9246 rp.flags = cp->flags;
9247 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9248 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9249
9250 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9251 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9252 }
9253
9254 static const struct hci_mgmt_handler mgmt_handlers[] = {
9255 { NULL }, /* 0x0000 (no command) */
9256 { read_version, MGMT_READ_VERSION_SIZE,
9257 HCI_MGMT_NO_HDEV |
9258 HCI_MGMT_UNTRUSTED },
9259 { read_commands, MGMT_READ_COMMANDS_SIZE,
9260 HCI_MGMT_NO_HDEV |
9261 HCI_MGMT_UNTRUSTED },
9262 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9263 HCI_MGMT_NO_HDEV |
9264 HCI_MGMT_UNTRUSTED },
9265 { read_controller_info, MGMT_READ_INFO_SIZE,
9266 HCI_MGMT_UNTRUSTED },
9267 { set_powered, MGMT_SETTING_SIZE },
9268 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9269 { set_connectable, MGMT_SETTING_SIZE },
9270 { set_fast_connectable, MGMT_SETTING_SIZE },
9271 { set_bondable, MGMT_SETTING_SIZE },
9272 { set_link_security, MGMT_SETTING_SIZE },
9273 { set_ssp, MGMT_SETTING_SIZE },
9274 { set_hs, MGMT_SETTING_SIZE },
9275 { set_le, MGMT_SETTING_SIZE },
9276 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9277 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9278 { add_uuid, MGMT_ADD_UUID_SIZE },
9279 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9280 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9281 HCI_MGMT_VAR_LEN },
9282 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9283 HCI_MGMT_VAR_LEN },
9284 { disconnect, MGMT_DISCONNECT_SIZE },
9285 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9286 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9287 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9288 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9289 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9290 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9291 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9292 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9293 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9294 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9295 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9296 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9297 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9298 HCI_MGMT_VAR_LEN },
9299 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9300 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9301 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9302 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9303 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9304 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9305 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9306 { set_advertising, MGMT_SETTING_SIZE },
9307 { set_bredr, MGMT_SETTING_SIZE },
9308 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9309 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9310 { set_secure_conn, MGMT_SETTING_SIZE },
9311 { set_debug_keys, MGMT_SETTING_SIZE },
9312 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9313 { load_irks, MGMT_LOAD_IRKS_SIZE,
9314 HCI_MGMT_VAR_LEN },
9315 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9316 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9317 { add_device, MGMT_ADD_DEVICE_SIZE },
9318 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9319 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9320 HCI_MGMT_VAR_LEN },
9321 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9322 HCI_MGMT_NO_HDEV |
9323 HCI_MGMT_UNTRUSTED },
9324 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9325 HCI_MGMT_UNCONFIGURED |
9326 HCI_MGMT_UNTRUSTED },
9327 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9328 HCI_MGMT_UNCONFIGURED },
9329 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9330 HCI_MGMT_UNCONFIGURED },
9331 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9332 HCI_MGMT_VAR_LEN },
9333 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9334 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9335 HCI_MGMT_NO_HDEV |
9336 HCI_MGMT_UNTRUSTED },
9337 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9338 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9339 HCI_MGMT_VAR_LEN },
9340 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9341 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9342 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9343 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9344 HCI_MGMT_UNTRUSTED },
9345 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9346 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9347 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9348 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9349 HCI_MGMT_VAR_LEN },
9350 { set_wideband_speech, MGMT_SETTING_SIZE },
9351 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9352 HCI_MGMT_UNTRUSTED },
9353 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9354 HCI_MGMT_UNTRUSTED |
9355 HCI_MGMT_HDEV_OPTIONAL },
9356 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9357 HCI_MGMT_VAR_LEN |
9358 HCI_MGMT_HDEV_OPTIONAL },
9359 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9360 HCI_MGMT_UNTRUSTED },
9361 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9362 HCI_MGMT_VAR_LEN },
9363 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9364 HCI_MGMT_UNTRUSTED },
9365 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9366 HCI_MGMT_VAR_LEN },
9367 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9368 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9369 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9370 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9371 HCI_MGMT_VAR_LEN },
9372 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9373 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9374 HCI_MGMT_VAR_LEN },
9375 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9376 HCI_MGMT_VAR_LEN },
9377 { add_adv_patterns_monitor_rssi,
9378 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9379 HCI_MGMT_VAR_LEN },
9380 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9381 HCI_MGMT_VAR_LEN },
9382 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9383 { mesh_send, MGMT_MESH_SEND_SIZE,
9384 HCI_MGMT_VAR_LEN },
9385 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9386 };
9387
mgmt_index_added(struct hci_dev * hdev)9388 void mgmt_index_added(struct hci_dev *hdev)
9389 {
9390 struct mgmt_ev_ext_index ev;
9391
9392 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9393 return;
9394
9395 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9396 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9397 HCI_MGMT_UNCONF_INDEX_EVENTS);
9398 ev.type = 0x01;
9399 } else {
9400 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9401 HCI_MGMT_INDEX_EVENTS);
9402 ev.type = 0x00;
9403 }
9404
9405 ev.bus = hdev->bus;
9406
9407 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9408 HCI_MGMT_EXT_INDEX_EVENTS);
9409 }
9410
mgmt_index_removed(struct hci_dev * hdev)9411 void mgmt_index_removed(struct hci_dev *hdev)
9412 {
9413 struct mgmt_ev_ext_index ev;
9414 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9415
9416 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9417 return;
9418
9419 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9420
9421 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9422 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9423 HCI_MGMT_UNCONF_INDEX_EVENTS);
9424 ev.type = 0x01;
9425 } else {
9426 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9427 HCI_MGMT_INDEX_EVENTS);
9428 ev.type = 0x00;
9429 }
9430
9431 ev.bus = hdev->bus;
9432
9433 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9434 HCI_MGMT_EXT_INDEX_EVENTS);
9435
9436 /* Cancel any remaining timed work */
9437 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9438 return;
9439 cancel_delayed_work_sync(&hdev->discov_off);
9440 cancel_delayed_work_sync(&hdev->service_cache);
9441 cancel_delayed_work_sync(&hdev->rpa_expired);
9442 }
9443
mgmt_power_on(struct hci_dev * hdev,int err)9444 void mgmt_power_on(struct hci_dev *hdev, int err)
9445 {
9446 struct cmd_lookup match = { NULL, hdev };
9447
9448 bt_dev_dbg(hdev, "err %d", err);
9449
9450 hci_dev_lock(hdev);
9451
9452 if (!err) {
9453 restart_le_actions(hdev);
9454 hci_update_passive_scan(hdev);
9455 }
9456
9457 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9458 &match);
9459
9460 new_settings(hdev, match.sk);
9461
9462 if (match.sk)
9463 sock_put(match.sk);
9464
9465 hci_dev_unlock(hdev);
9466 }
9467
__mgmt_power_off(struct hci_dev * hdev)9468 void __mgmt_power_off(struct hci_dev *hdev)
9469 {
9470 struct cmd_lookup match = { NULL, hdev };
9471 u8 zero_cod[] = { 0, 0, 0 };
9472
9473 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9474 &match);
9475
9476 /* If the power off is because of hdev unregistration let
9477 * use the appropriate INVALID_INDEX status. Otherwise use
9478 * NOT_POWERED. We cover both scenarios here since later in
9479 * mgmt_index_removed() any hci_conn callbacks will have already
9480 * been triggered, potentially causing misleading DISCONNECTED
9481 * status responses.
9482 */
9483 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9484 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9485 else
9486 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9487
9488 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9489
9490 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9491 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9492 zero_cod, sizeof(zero_cod),
9493 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9494 ext_info_changed(hdev, NULL);
9495 }
9496
9497 new_settings(hdev, match.sk);
9498
9499 if (match.sk)
9500 sock_put(match.sk);
9501 }
9502
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9503 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9504 {
9505 struct mgmt_pending_cmd *cmd;
9506 u8 status;
9507
9508 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9509 if (!cmd)
9510 return;
9511
9512 if (err == -ERFKILL)
9513 status = MGMT_STATUS_RFKILLED;
9514 else
9515 status = MGMT_STATUS_FAILED;
9516
9517 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9518
9519 mgmt_pending_remove(cmd);
9520 }
9521
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9522 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9523 bool persistent)
9524 {
9525 struct mgmt_ev_new_link_key ev;
9526
9527 memset(&ev, 0, sizeof(ev));
9528
9529 ev.store_hint = persistent;
9530 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9531 ev.key.addr.type = BDADDR_BREDR;
9532 ev.key.type = key->type;
9533 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9534 ev.key.pin_len = key->pin_len;
9535
9536 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9537 }
9538
mgmt_ltk_type(struct smp_ltk * ltk)9539 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9540 {
9541 switch (ltk->type) {
9542 case SMP_LTK:
9543 case SMP_LTK_RESPONDER:
9544 if (ltk->authenticated)
9545 return MGMT_LTK_AUTHENTICATED;
9546 return MGMT_LTK_UNAUTHENTICATED;
9547 case SMP_LTK_P256:
9548 if (ltk->authenticated)
9549 return MGMT_LTK_P256_AUTH;
9550 return MGMT_LTK_P256_UNAUTH;
9551 case SMP_LTK_P256_DEBUG:
9552 return MGMT_LTK_P256_DEBUG;
9553 }
9554
9555 return MGMT_LTK_UNAUTHENTICATED;
9556 }
9557
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9558 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9559 {
9560 struct mgmt_ev_new_long_term_key ev;
9561
9562 memset(&ev, 0, sizeof(ev));
9563
9564 /* Devices using resolvable or non-resolvable random addresses
9565 * without providing an identity resolving key don't require
9566 * to store long term keys. Their addresses will change the
9567 * next time around.
9568 *
9569 * Only when a remote device provides an identity address
9570 * make sure the long term key is stored. If the remote
9571 * identity is known, the long term keys are internally
9572 * mapped to the identity address. So allow static random
9573 * and public addresses here.
9574 */
9575 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9576 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9577 ev.store_hint = 0x00;
9578 else
9579 ev.store_hint = persistent;
9580
9581 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9582 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9583 ev.key.type = mgmt_ltk_type(key);
9584 ev.key.enc_size = key->enc_size;
9585 ev.key.ediv = key->ediv;
9586 ev.key.rand = key->rand;
9587
9588 if (key->type == SMP_LTK)
9589 ev.key.initiator = 1;
9590
9591 /* Make sure we copy only the significant bytes based on the
9592 * encryption key size, and set the rest of the value to zeroes.
9593 */
9594 memcpy(ev.key.val, key->val, key->enc_size);
9595 memset(ev.key.val + key->enc_size, 0,
9596 sizeof(ev.key.val) - key->enc_size);
9597
9598 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9599 }
9600
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9601 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9602 {
9603 struct mgmt_ev_new_irk ev;
9604
9605 memset(&ev, 0, sizeof(ev));
9606
9607 ev.store_hint = persistent;
9608
9609 bacpy(&ev.rpa, &irk->rpa);
9610 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9611 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9612 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9613
9614 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9615 }
9616
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9617 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9618 bool persistent)
9619 {
9620 struct mgmt_ev_new_csrk ev;
9621
9622 memset(&ev, 0, sizeof(ev));
9623
9624 /* Devices using resolvable or non-resolvable random addresses
9625 * without providing an identity resolving key don't require
9626 * to store signature resolving keys. Their addresses will change
9627 * the next time around.
9628 *
9629 * Only when a remote device provides an identity address
9630 * make sure the signature resolving key is stored. So allow
9631 * static random and public addresses here.
9632 */
9633 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9634 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9635 ev.store_hint = 0x00;
9636 else
9637 ev.store_hint = persistent;
9638
9639 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9640 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9641 ev.key.type = csrk->type;
9642 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9643
9644 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9645 }
9646
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9647 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9648 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9649 u16 max_interval, u16 latency, u16 timeout)
9650 {
9651 struct mgmt_ev_new_conn_param ev;
9652
9653 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9654 return;
9655
9656 memset(&ev, 0, sizeof(ev));
9657 bacpy(&ev.addr.bdaddr, bdaddr);
9658 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9659 ev.store_hint = store_hint;
9660 ev.min_interval = cpu_to_le16(min_interval);
9661 ev.max_interval = cpu_to_le16(max_interval);
9662 ev.latency = cpu_to_le16(latency);
9663 ev.timeout = cpu_to_le16(timeout);
9664
9665 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9666 }
9667
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9668 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9669 u8 *name, u8 name_len)
9670 {
9671 struct sk_buff *skb;
9672 struct mgmt_ev_device_connected *ev;
9673 u16 eir_len = 0;
9674 u32 flags = 0;
9675
9676 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9677 return;
9678
9679 /* allocate buff for LE or BR/EDR adv */
9680 if (conn->le_adv_data_len > 0)
9681 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9682 sizeof(*ev) + conn->le_adv_data_len);
9683 else
9684 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9685 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9686 eir_precalc_len(sizeof(conn->dev_class)));
9687
9688 if (!skb)
9689 return;
9690
9691 ev = skb_put(skb, sizeof(*ev));
9692 bacpy(&ev->addr.bdaddr, &conn->dst);
9693 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9694
9695 if (conn->out)
9696 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9697
9698 ev->flags = __cpu_to_le32(flags);
9699
9700 /* We must ensure that the EIR Data fields are ordered and
9701 * unique. Keep it simple for now and avoid the problem by not
9702 * adding any BR/EDR data to the LE adv.
9703 */
9704 if (conn->le_adv_data_len > 0) {
9705 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9706 eir_len = conn->le_adv_data_len;
9707 } else {
9708 if (name)
9709 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9710
9711 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9712 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9713 conn->dev_class, sizeof(conn->dev_class));
9714 }
9715
9716 ev->eir_len = cpu_to_le16(eir_len);
9717
9718 mgmt_event_skb(skb, NULL);
9719 }
9720
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9721 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9722 {
9723 struct hci_dev *hdev = data;
9724 struct mgmt_cp_unpair_device *cp = cmd->param;
9725
9726 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9727
9728 cmd->cmd_complete(cmd, 0);
9729 }
9730
mgmt_powering_down(struct hci_dev * hdev)9731 bool mgmt_powering_down(struct hci_dev *hdev)
9732 {
9733 struct mgmt_pending_cmd *cmd;
9734 struct mgmt_mode *cp;
9735
9736 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9737 if (!cmd)
9738 return false;
9739
9740 cp = cmd->param;
9741 if (!cp->val)
9742 return true;
9743
9744 return false;
9745 }
9746
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9747 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9748 u8 link_type, u8 addr_type, u8 reason,
9749 bool mgmt_connected)
9750 {
9751 struct mgmt_ev_device_disconnected ev;
9752 struct sock *sk = NULL;
9753
9754 if (!mgmt_connected)
9755 return;
9756
9757 if (link_type != ACL_LINK && link_type != LE_LINK)
9758 return;
9759
9760 bacpy(&ev.addr.bdaddr, bdaddr);
9761 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9762 ev.reason = reason;
9763
9764 /* Report disconnects due to suspend */
9765 if (hdev->suspended)
9766 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9767
9768 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9769
9770 if (sk)
9771 sock_put(sk);
9772 }
9773
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9774 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9775 u8 link_type, u8 addr_type, u8 status)
9776 {
9777 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9778 struct mgmt_cp_disconnect *cp;
9779 struct mgmt_pending_cmd *cmd;
9780
9781 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9782 unpair_device_rsp, hdev);
9783
9784 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9785 if (!cmd)
9786 return;
9787
9788 cp = cmd->param;
9789
9790 if (bacmp(bdaddr, &cp->addr.bdaddr))
9791 return;
9792
9793 if (cp->addr.type != bdaddr_type)
9794 return;
9795
9796 cmd->cmd_complete(cmd, mgmt_status(status));
9797 mgmt_pending_remove(cmd);
9798 }
9799
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9800 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9801 {
9802 struct mgmt_ev_connect_failed ev;
9803
9804 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9805 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9806 conn->dst_type, status, true);
9807 return;
9808 }
9809
9810 bacpy(&ev.addr.bdaddr, &conn->dst);
9811 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9812 ev.status = mgmt_status(status);
9813
9814 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9815 }
9816
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9817 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9818 {
9819 struct mgmt_ev_pin_code_request ev;
9820
9821 bacpy(&ev.addr.bdaddr, bdaddr);
9822 ev.addr.type = BDADDR_BREDR;
9823 ev.secure = secure;
9824
9825 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9826 }
9827
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9828 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9829 u8 status)
9830 {
9831 struct mgmt_pending_cmd *cmd;
9832
9833 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9834 if (!cmd)
9835 return;
9836
9837 cmd->cmd_complete(cmd, mgmt_status(status));
9838 mgmt_pending_remove(cmd);
9839 }
9840
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9841 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9842 u8 status)
9843 {
9844 struct mgmt_pending_cmd *cmd;
9845
9846 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9847 if (!cmd)
9848 return;
9849
9850 cmd->cmd_complete(cmd, mgmt_status(status));
9851 mgmt_pending_remove(cmd);
9852 }
9853
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9854 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9855 u8 link_type, u8 addr_type, u32 value,
9856 u8 confirm_hint)
9857 {
9858 struct mgmt_ev_user_confirm_request ev;
9859
9860 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9861
9862 bacpy(&ev.addr.bdaddr, bdaddr);
9863 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9864 ev.confirm_hint = confirm_hint;
9865 ev.value = cpu_to_le32(value);
9866
9867 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9868 NULL);
9869 }
9870
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9871 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9872 u8 link_type, u8 addr_type)
9873 {
9874 struct mgmt_ev_user_passkey_request ev;
9875
9876 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9877
9878 bacpy(&ev.addr.bdaddr, bdaddr);
9879 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9880
9881 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9882 NULL);
9883 }
9884
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9885 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9886 u8 link_type, u8 addr_type, u8 status,
9887 u8 opcode)
9888 {
9889 struct mgmt_pending_cmd *cmd;
9890
9891 cmd = pending_find(opcode, hdev);
9892 if (!cmd)
9893 return -ENOENT;
9894
9895 cmd->cmd_complete(cmd, mgmt_status(status));
9896 mgmt_pending_remove(cmd);
9897
9898 return 0;
9899 }
9900
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9901 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9902 u8 link_type, u8 addr_type, u8 status)
9903 {
9904 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9905 status, MGMT_OP_USER_CONFIRM_REPLY);
9906 }
9907
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9908 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9909 u8 link_type, u8 addr_type, u8 status)
9910 {
9911 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9912 status,
9913 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9914 }
9915
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9916 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9917 u8 link_type, u8 addr_type, u8 status)
9918 {
9919 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9920 status, MGMT_OP_USER_PASSKEY_REPLY);
9921 }
9922
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9923 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9924 u8 link_type, u8 addr_type, u8 status)
9925 {
9926 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9927 status,
9928 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9929 }
9930
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9931 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9932 u8 link_type, u8 addr_type, u32 passkey,
9933 u8 entered)
9934 {
9935 struct mgmt_ev_passkey_notify ev;
9936
9937 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9938
9939 bacpy(&ev.addr.bdaddr, bdaddr);
9940 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9941 ev.passkey = __cpu_to_le32(passkey);
9942 ev.entered = entered;
9943
9944 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9945 }
9946
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9947 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9948 {
9949 struct mgmt_ev_auth_failed ev;
9950 struct mgmt_pending_cmd *cmd;
9951 u8 status = mgmt_status(hci_status);
9952
9953 bacpy(&ev.addr.bdaddr, &conn->dst);
9954 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9955 ev.status = status;
9956
9957 cmd = find_pairing(conn);
9958
9959 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9960 cmd ? cmd->sk : NULL);
9961
9962 if (cmd) {
9963 cmd->cmd_complete(cmd, status);
9964 mgmt_pending_remove(cmd);
9965 }
9966 }
9967
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9968 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9969 {
9970 struct cmd_lookup match = { NULL, hdev };
9971 bool changed;
9972
9973 if (status) {
9974 u8 mgmt_err = mgmt_status(status);
9975 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9976 cmd_status_rsp, &mgmt_err);
9977 return;
9978 }
9979
9980 if (test_bit(HCI_AUTH, &hdev->flags))
9981 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9982 else
9983 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9984
9985 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
9986 settings_rsp, &match);
9987
9988 if (changed)
9989 new_settings(hdev, match.sk);
9990
9991 if (match.sk)
9992 sock_put(match.sk);
9993 }
9994
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9995 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9996 {
9997 struct cmd_lookup *match = data;
9998
9999 if (match->sk == NULL) {
10000 match->sk = cmd->sk;
10001 sock_hold(match->sk);
10002 }
10003 }
10004
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10005 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10006 u8 status)
10007 {
10008 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10009
10010 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10011 &match);
10012 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10013 &match);
10014 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10015 &match);
10016
10017 if (!status) {
10018 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10019 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10020 ext_info_changed(hdev, NULL);
10021 }
10022
10023 if (match.sk)
10024 sock_put(match.sk);
10025 }
10026
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10027 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10028 {
10029 struct mgmt_cp_set_local_name ev;
10030 struct mgmt_pending_cmd *cmd;
10031
10032 if (status)
10033 return;
10034
10035 memset(&ev, 0, sizeof(ev));
10036 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10037 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10038
10039 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10040 if (!cmd) {
10041 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10042
10043 /* If this is a HCI command related to powering on the
10044 * HCI dev don't send any mgmt signals.
10045 */
10046 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10047 return;
10048 }
10049
10050 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10051 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10052 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10053 }
10054
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10055 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10056 {
10057 int i;
10058
10059 for (i = 0; i < uuid_count; i++) {
10060 if (!memcmp(uuid, uuids[i], 16))
10061 return true;
10062 }
10063
10064 return false;
10065 }
10066
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10067 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10068 {
10069 u16 parsed = 0;
10070
10071 while (parsed < eir_len) {
10072 u8 field_len = eir[0];
10073 u8 uuid[16];
10074 int i;
10075
10076 if (field_len == 0)
10077 break;
10078
10079 if (eir_len - parsed < field_len + 1)
10080 break;
10081
10082 switch (eir[1]) {
10083 case EIR_UUID16_ALL:
10084 case EIR_UUID16_SOME:
10085 for (i = 0; i + 3 <= field_len; i += 2) {
10086 memcpy(uuid, bluetooth_base_uuid, 16);
10087 uuid[13] = eir[i + 3];
10088 uuid[12] = eir[i + 2];
10089 if (has_uuid(uuid, uuid_count, uuids))
10090 return true;
10091 }
10092 break;
10093 case EIR_UUID32_ALL:
10094 case EIR_UUID32_SOME:
10095 for (i = 0; i + 5 <= field_len; i += 4) {
10096 memcpy(uuid, bluetooth_base_uuid, 16);
10097 uuid[15] = eir[i + 5];
10098 uuid[14] = eir[i + 4];
10099 uuid[13] = eir[i + 3];
10100 uuid[12] = eir[i + 2];
10101 if (has_uuid(uuid, uuid_count, uuids))
10102 return true;
10103 }
10104 break;
10105 case EIR_UUID128_ALL:
10106 case EIR_UUID128_SOME:
10107 for (i = 0; i + 17 <= field_len; i += 16) {
10108 memcpy(uuid, eir + i + 2, 16);
10109 if (has_uuid(uuid, uuid_count, uuids))
10110 return true;
10111 }
10112 break;
10113 }
10114
10115 parsed += field_len + 1;
10116 eir += field_len + 1;
10117 }
10118
10119 return false;
10120 }
10121
restart_le_scan(struct hci_dev * hdev)10122 static void restart_le_scan(struct hci_dev *hdev)
10123 {
10124 /* If controller is not scanning we are done. */
10125 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10126 return;
10127
10128 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10129 hdev->discovery.scan_start +
10130 hdev->discovery.scan_duration))
10131 return;
10132
10133 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10134 DISCOV_LE_RESTART_DELAY);
10135 }
10136
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10137 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10138 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10139 {
10140 /* If a RSSI threshold has been specified, and
10141 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10142 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10143 * is set, let it through for further processing, as we might need to
10144 * restart the scan.
10145 *
10146 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10147 * the results are also dropped.
10148 */
10149 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10150 (rssi == HCI_RSSI_INVALID ||
10151 (rssi < hdev->discovery.rssi &&
10152 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10153 return false;
10154
10155 if (hdev->discovery.uuid_count != 0) {
10156 /* If a list of UUIDs is provided in filter, results with no
10157 * matching UUID should be dropped.
10158 */
10159 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10160 hdev->discovery.uuids) &&
10161 !eir_has_uuids(scan_rsp, scan_rsp_len,
10162 hdev->discovery.uuid_count,
10163 hdev->discovery.uuids))
10164 return false;
10165 }
10166
10167 /* If duplicate filtering does not report RSSI changes, then restart
10168 * scanning to ensure updated result with updated RSSI values.
10169 */
10170 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10171 restart_le_scan(hdev);
10172
10173 /* Validate RSSI value against the RSSI threshold once more. */
10174 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10175 rssi < hdev->discovery.rssi)
10176 return false;
10177 }
10178
10179 return true;
10180 }
10181
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10182 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10183 bdaddr_t *bdaddr, u8 addr_type)
10184 {
10185 struct mgmt_ev_adv_monitor_device_lost ev;
10186
10187 ev.monitor_handle = cpu_to_le16(handle);
10188 bacpy(&ev.addr.bdaddr, bdaddr);
10189 ev.addr.type = addr_type;
10190
10191 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10192 NULL);
10193 }
10194
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10195 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10196 struct sk_buff *skb,
10197 struct sock *skip_sk,
10198 u16 handle)
10199 {
10200 struct sk_buff *advmon_skb;
10201 size_t advmon_skb_len;
10202 __le16 *monitor_handle;
10203
10204 if (!skb)
10205 return;
10206
10207 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10208 sizeof(struct mgmt_ev_device_found)) + skb->len;
10209 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10210 advmon_skb_len);
10211 if (!advmon_skb)
10212 return;
10213
10214 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10215 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10216 * store monitor_handle of the matched monitor.
10217 */
10218 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10219 *monitor_handle = cpu_to_le16(handle);
10220 skb_put_data(advmon_skb, skb->data, skb->len);
10221
10222 mgmt_event_skb(advmon_skb, skip_sk);
10223 }
10224
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10225 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10226 bdaddr_t *bdaddr, bool report_device,
10227 struct sk_buff *skb,
10228 struct sock *skip_sk)
10229 {
10230 struct monitored_device *dev, *tmp;
10231 bool matched = false;
10232 bool notified = false;
10233
10234 /* We have received the Advertisement Report because:
10235 * 1. the kernel has initiated active discovery
10236 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10237 * passive scanning
10238 * 3. if none of the above is true, we have one or more active
10239 * Advertisement Monitor
10240 *
10241 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10242 * and report ONLY one advertisement per device for the matched Monitor
10243 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10244 *
10245 * For case 3, since we are not active scanning and all advertisements
10246 * received are due to a matched Advertisement Monitor, report all
10247 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10248 */
10249 if (report_device && !hdev->advmon_pend_notify) {
10250 mgmt_event_skb(skb, skip_sk);
10251 return;
10252 }
10253
10254 hdev->advmon_pend_notify = false;
10255
10256 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10257 if (!bacmp(&dev->bdaddr, bdaddr)) {
10258 matched = true;
10259
10260 if (!dev->notified) {
10261 mgmt_send_adv_monitor_device_found(hdev, skb,
10262 skip_sk,
10263 dev->handle);
10264 notified = true;
10265 dev->notified = true;
10266 }
10267 }
10268
10269 if (!dev->notified)
10270 hdev->advmon_pend_notify = true;
10271 }
10272
10273 if (!report_device &&
10274 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10275 /* Handle 0 indicates that we are not active scanning and this
10276 * is a subsequent advertisement report for an already matched
10277 * Advertisement Monitor or the controller offloading support
10278 * is not available.
10279 */
10280 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10281 }
10282
10283 if (report_device)
10284 mgmt_event_skb(skb, skip_sk);
10285 else
10286 kfree_skb(skb);
10287 }
10288
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10289 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10290 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10291 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10292 u64 instant)
10293 {
10294 struct sk_buff *skb;
10295 struct mgmt_ev_mesh_device_found *ev;
10296 int i, j;
10297
10298 if (!hdev->mesh_ad_types[0])
10299 goto accepted;
10300
10301 /* Scan for requested AD types */
10302 if (eir_len > 0) {
10303 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10304 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10305 if (!hdev->mesh_ad_types[j])
10306 break;
10307
10308 if (hdev->mesh_ad_types[j] == eir[i + 1])
10309 goto accepted;
10310 }
10311 }
10312 }
10313
10314 if (scan_rsp_len > 0) {
10315 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10316 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10317 if (!hdev->mesh_ad_types[j])
10318 break;
10319
10320 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10321 goto accepted;
10322 }
10323 }
10324 }
10325
10326 return;
10327
10328 accepted:
10329 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10330 sizeof(*ev) + eir_len + scan_rsp_len);
10331 if (!skb)
10332 return;
10333
10334 ev = skb_put(skb, sizeof(*ev));
10335
10336 bacpy(&ev->addr.bdaddr, bdaddr);
10337 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10338 ev->rssi = rssi;
10339 ev->flags = cpu_to_le32(flags);
10340 ev->instant = cpu_to_le64(instant);
10341
10342 if (eir_len > 0)
10343 /* Copy EIR or advertising data into event */
10344 skb_put_data(skb, eir, eir_len);
10345
10346 if (scan_rsp_len > 0)
10347 /* Append scan response data to event */
10348 skb_put_data(skb, scan_rsp, scan_rsp_len);
10349
10350 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10351
10352 mgmt_event_skb(skb, NULL);
10353 }
10354
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10355 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10356 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10357 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10358 u64 instant)
10359 {
10360 struct sk_buff *skb;
10361 struct mgmt_ev_device_found *ev;
10362 bool report_device = hci_discovery_active(hdev);
10363
10364 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10365 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10366 eir, eir_len, scan_rsp, scan_rsp_len,
10367 instant);
10368
10369 /* Don't send events for a non-kernel initiated discovery. With
10370 * LE one exception is if we have pend_le_reports > 0 in which
10371 * case we're doing passive scanning and want these events.
10372 */
10373 if (!hci_discovery_active(hdev)) {
10374 if (link_type == ACL_LINK)
10375 return;
10376 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10377 report_device = true;
10378 else if (!hci_is_adv_monitoring(hdev))
10379 return;
10380 }
10381
10382 if (hdev->discovery.result_filtering) {
10383 /* We are using service discovery */
10384 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10385 scan_rsp_len))
10386 return;
10387 }
10388
10389 if (hdev->discovery.limited) {
10390 /* Check for limited discoverable bit */
10391 if (dev_class) {
10392 if (!(dev_class[1] & 0x20))
10393 return;
10394 } else {
10395 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10396 if (!flags || !(flags[0] & LE_AD_LIMITED))
10397 return;
10398 }
10399 }
10400
10401 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10402 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10403 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10404 if (!skb)
10405 return;
10406
10407 ev = skb_put(skb, sizeof(*ev));
10408
10409 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10410 * RSSI value was reported as 0 when not available. This behavior
10411 * is kept when using device discovery. This is required for full
10412 * backwards compatibility with the API.
10413 *
10414 * However when using service discovery, the value 127 will be
10415 * returned when the RSSI is not available.
10416 */
10417 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10418 link_type == ACL_LINK)
10419 rssi = 0;
10420
10421 bacpy(&ev->addr.bdaddr, bdaddr);
10422 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10423 ev->rssi = rssi;
10424 ev->flags = cpu_to_le32(flags);
10425
10426 if (eir_len > 0)
10427 /* Copy EIR or advertising data into event */
10428 skb_put_data(skb, eir, eir_len);
10429
10430 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10431 u8 eir_cod[5];
10432
10433 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10434 dev_class, 3);
10435 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10436 }
10437
10438 if (scan_rsp_len > 0)
10439 /* Append scan response data to event */
10440 skb_put_data(skb, scan_rsp, scan_rsp_len);
10441
10442 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10443
10444 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10445 }
10446
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10447 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10448 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10449 {
10450 struct sk_buff *skb;
10451 struct mgmt_ev_device_found *ev;
10452 u16 eir_len = 0;
10453 u32 flags = 0;
10454
10455 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10456 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10457 if (!skb)
10458 return;
10459
10460 ev = skb_put(skb, sizeof(*ev));
10461 bacpy(&ev->addr.bdaddr, bdaddr);
10462 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10463 ev->rssi = rssi;
10464
10465 if (name)
10466 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10467 else
10468 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10469
10470 ev->eir_len = cpu_to_le16(eir_len);
10471 ev->flags = cpu_to_le32(flags);
10472
10473 mgmt_event_skb(skb, NULL);
10474 }
10475
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10476 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10477 {
10478 struct mgmt_ev_discovering ev;
10479
10480 bt_dev_dbg(hdev, "discovering %u", discovering);
10481
10482 memset(&ev, 0, sizeof(ev));
10483 ev.type = hdev->discovery.type;
10484 ev.discovering = discovering;
10485
10486 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10487 }
10488
mgmt_suspending(struct hci_dev * hdev,u8 state)10489 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10490 {
10491 struct mgmt_ev_controller_suspend ev;
10492
10493 ev.suspend_state = state;
10494 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10495 }
10496
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10497 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10498 u8 addr_type)
10499 {
10500 struct mgmt_ev_controller_resume ev;
10501
10502 ev.wake_reason = reason;
10503 if (bdaddr) {
10504 bacpy(&ev.addr.bdaddr, bdaddr);
10505 ev.addr.type = addr_type;
10506 } else {
10507 memset(&ev.addr, 0, sizeof(ev.addr));
10508 }
10509
10510 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10511 }
10512
10513 static struct hci_mgmt_chan chan = {
10514 .channel = HCI_CHANNEL_CONTROL,
10515 .handler_count = ARRAY_SIZE(mgmt_handlers),
10516 .handlers = mgmt_handlers,
10517 .hdev_init = mgmt_init_hdev,
10518 };
10519
mgmt_init(void)10520 int mgmt_init(void)
10521 {
10522 return hci_mgmt_chan_register(&chan);
10523 }
10524
mgmt_exit(void)10525 void mgmt_exit(void)
10526 {
10527 hci_mgmt_chan_unregister(&chan);
10528 }
10529
mgmt_cleanup(struct sock * sk)10530 void mgmt_cleanup(struct sock *sk)
10531 {
10532 struct mgmt_mesh_tx *mesh_tx;
10533 struct hci_dev *hdev;
10534
10535 read_lock(&hci_dev_list_lock);
10536
10537 list_for_each_entry(hdev, &hci_dev_list, list) {
10538 do {
10539 mesh_tx = mgmt_mesh_next(hdev, sk);
10540
10541 if (mesh_tx)
10542 mesh_send_complete(hdev, mesh_tx, true);
10543 } while (mesh_tx);
10544 }
10545
10546 read_unlock(&hci_dev_list_lock);
10547 }
10548