1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Bluetooth support for Intel devices 5 * 6 * Copyright (C) 2015 Intel Corporation 7 */ 8 9 #include <linux/module.h> 10 #include <linux/firmware.h> 11 #include <linux/regmap.h> 12 #include <asm/unaligned.h> 13 14 #include <net/bluetooth/bluetooth.h> 15 #include <net/bluetooth/hci_core.h> 16 17 #include "btintel.h" 18 19 #define VERSION "0.1" 20 21 #define BDADDR_INTEL (&(bdaddr_t) {{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}}) 22 23 int btintel_check_bdaddr(struct hci_dev *hdev) 24 { 25 struct hci_rp_read_bd_addr *bda; 26 struct sk_buff *skb; 27 28 skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, 29 HCI_INIT_TIMEOUT); 30 if (IS_ERR(skb)) { 31 int err = PTR_ERR(skb); 32 bt_dev_err(hdev, "Reading Intel device address failed (%d)", 33 err); 34 return err; 35 } 36 37 if (skb->len != sizeof(*bda)) { 38 bt_dev_err(hdev, "Intel device address length mismatch"); 39 kfree_skb(skb); 40 return -EIO; 41 } 42 43 bda = (struct hci_rp_read_bd_addr *)skb->data; 44 45 /* For some Intel based controllers, the default Bluetooth device 46 * address 00:03:19:9E:8B:00 can be found. These controllers are 47 * fully operational, but have the danger of duplicate addresses 48 * and that in turn can cause problems with Bluetooth operation. 49 */ 50 if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) { 51 bt_dev_err(hdev, "Found Intel default device address (%pMR)", 52 &bda->bdaddr); 53 set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); 54 } 55 56 kfree_skb(skb); 57 58 return 0; 59 } 60 EXPORT_SYMBOL_GPL(btintel_check_bdaddr); 61 62 int btintel_enter_mfg(struct hci_dev *hdev) 63 { 64 static const u8 param[] = { 0x01, 0x00 }; 65 struct sk_buff *skb; 66 67 skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); 68 if (IS_ERR(skb)) { 69 bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)", 70 PTR_ERR(skb)); 71 return PTR_ERR(skb); 72 } 73 kfree_skb(skb); 74 75 return 0; 76 } 77 EXPORT_SYMBOL_GPL(btintel_enter_mfg); 78 79 int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched) 80 { 81 u8 param[] = { 0x00, 0x00 }; 82 struct sk_buff *skb; 83 84 /* The 2nd command parameter specifies the manufacturing exit method: 85 * 0x00: Just disable the manufacturing mode (0x00). 86 * 0x01: Disable manufacturing mode and reset with patches deactivated. 87 * 0x02: Disable manufacturing mode and reset with patches activated. 88 */ 89 if (reset) 90 param[1] |= patched ? 0x02 : 0x01; 91 92 skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); 93 if (IS_ERR(skb)) { 94 bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)", 95 PTR_ERR(skb)); 96 return PTR_ERR(skb); 97 } 98 kfree_skb(skb); 99 100 return 0; 101 } 102 EXPORT_SYMBOL_GPL(btintel_exit_mfg); 103 104 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) 105 { 106 struct sk_buff *skb; 107 int err; 108 109 skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT); 110 if (IS_ERR(skb)) { 111 err = PTR_ERR(skb); 112 bt_dev_err(hdev, "Changing Intel device address failed (%d)", 113 err); 114 return err; 115 } 116 kfree_skb(skb); 117 118 return 0; 119 } 120 EXPORT_SYMBOL_GPL(btintel_set_bdaddr); 121 122 int btintel_set_diag(struct hci_dev *hdev, bool enable) 123 { 124 struct sk_buff *skb; 125 u8 param[3]; 126 int err; 127 128 if (enable) { 129 param[0] = 0x03; 130 param[1] = 0x03; 131 param[2] = 0x03; 132 } else { 133 param[0] = 0x00; 134 param[1] = 0x00; 135 param[2] = 0x00; 136 } 137 138 skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT); 139 if (IS_ERR(skb)) { 140 err = PTR_ERR(skb); 141 if (err == -ENODATA) 142 goto done; 143 bt_dev_err(hdev, "Changing Intel diagnostic mode failed (%d)", 144 err); 145 return err; 146 } 147 kfree_skb(skb); 148 149 done: 150 btintel_set_event_mask(hdev, enable); 151 return 0; 152 } 153 EXPORT_SYMBOL_GPL(btintel_set_diag); 154 155 int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable) 156 { 157 int err, ret; 158 159 err = btintel_enter_mfg(hdev); 160 if (err) 161 return err; 162 163 ret = btintel_set_diag(hdev, enable); 164 165 err = btintel_exit_mfg(hdev, false, false); 166 if (err) 167 return err; 168 169 return ret; 170 } 171 EXPORT_SYMBOL_GPL(btintel_set_diag_mfg); 172 173 void btintel_hw_error(struct hci_dev *hdev, u8 code) 174 { 175 struct sk_buff *skb; 176 u8 type = 0x00; 177 178 bt_dev_err(hdev, "Hardware error 0x%2.2x", code); 179 180 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); 181 if (IS_ERR(skb)) { 182 bt_dev_err(hdev, "Reset after hardware error failed (%ld)", 183 PTR_ERR(skb)); 184 return; 185 } 186 kfree_skb(skb); 187 188 skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); 189 if (IS_ERR(skb)) { 190 bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)", 191 PTR_ERR(skb)); 192 return; 193 } 194 195 if (skb->len != 13) { 196 bt_dev_err(hdev, "Exception info size mismatch"); 197 kfree_skb(skb); 198 return; 199 } 200 201 bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1)); 202 203 kfree_skb(skb); 204 } 205 EXPORT_SYMBOL_GPL(btintel_hw_error); 206 207 void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) 208 { 209 const char *variant; 210 211 switch (ver->fw_variant) { 212 case 0x06: 213 variant = "Bootloader"; 214 break; 215 case 0x23: 216 variant = "Firmware"; 217 break; 218 default: 219 return; 220 } 221 222 bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u", 223 variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, 224 ver->fw_build_num, ver->fw_build_ww, 225 2000 + ver->fw_build_yy); 226 } 227 EXPORT_SYMBOL_GPL(btintel_version_info); 228 229 int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, 230 const void *param) 231 { 232 while (plen > 0) { 233 struct sk_buff *skb; 234 u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; 235 236 cmd_param[0] = fragment_type; 237 memcpy(cmd_param + 1, param, fragment_len); 238 239 skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, 240 cmd_param, HCI_INIT_TIMEOUT); 241 if (IS_ERR(skb)) 242 return PTR_ERR(skb); 243 244 kfree_skb(skb); 245 246 plen -= fragment_len; 247 param += fragment_len; 248 } 249 250 return 0; 251 } 252 EXPORT_SYMBOL_GPL(btintel_secure_send); 253 254 int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name) 255 { 256 const struct firmware *fw; 257 struct sk_buff *skb; 258 const u8 *fw_ptr; 259 int err; 260 261 err = request_firmware_direct(&fw, ddc_name, &hdev->dev); 262 if (err < 0) { 263 bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)", 264 ddc_name, err); 265 return err; 266 } 267 268 bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name); 269 270 fw_ptr = fw->data; 271 272 /* DDC file contains one or more DDC structure which has 273 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2). 274 */ 275 while (fw->size > fw_ptr - fw->data) { 276 u8 cmd_plen = fw_ptr[0] + sizeof(u8); 277 278 skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr, 279 HCI_INIT_TIMEOUT); 280 if (IS_ERR(skb)) { 281 bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)", 282 PTR_ERR(skb)); 283 release_firmware(fw); 284 return PTR_ERR(skb); 285 } 286 287 fw_ptr += cmd_plen; 288 kfree_skb(skb); 289 } 290 291 release_firmware(fw); 292 293 bt_dev_info(hdev, "Applying Intel DDC parameters completed"); 294 295 return 0; 296 } 297 EXPORT_SYMBOL_GPL(btintel_load_ddc_config); 298 299 int btintel_set_event_mask(struct hci_dev *hdev, bool debug) 300 { 301 u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 302 struct sk_buff *skb; 303 int err; 304 305 if (debug) 306 mask[1] |= 0x62; 307 308 skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT); 309 if (IS_ERR(skb)) { 310 err = PTR_ERR(skb); 311 bt_dev_err(hdev, "Setting Intel event mask failed (%d)", err); 312 return err; 313 } 314 kfree_skb(skb); 315 316 return 0; 317 } 318 EXPORT_SYMBOL_GPL(btintel_set_event_mask); 319 320 int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) 321 { 322 int err, ret; 323 324 err = btintel_enter_mfg(hdev); 325 if (err) 326 return err; 327 328 ret = btintel_set_event_mask(hdev, debug); 329 330 err = btintel_exit_mfg(hdev, false, false); 331 if (err) 332 return err; 333 334 return ret; 335 } 336 EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg); 337 338 int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver) 339 { 340 struct sk_buff *skb; 341 342 skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); 343 if (IS_ERR(skb)) { 344 bt_dev_err(hdev, "Reading Intel version information failed (%ld)", 345 PTR_ERR(skb)); 346 return PTR_ERR(skb); 347 } 348 349 if (skb->len != sizeof(*ver)) { 350 bt_dev_err(hdev, "Intel version event size mismatch"); 351 kfree_skb(skb); 352 return -EILSEQ; 353 } 354 355 memcpy(ver, skb->data, sizeof(*ver)); 356 357 kfree_skb(skb); 358 359 return 0; 360 } 361 EXPORT_SYMBOL_GPL(btintel_read_version); 362 363 /* ------- REGMAP IBT SUPPORT ------- */ 364 365 #define IBT_REG_MODE_8BIT 0x00 366 #define IBT_REG_MODE_16BIT 0x01 367 #define IBT_REG_MODE_32BIT 0x02 368 369 struct regmap_ibt_context { 370 struct hci_dev *hdev; 371 __u16 op_write; 372 __u16 op_read; 373 }; 374 375 struct ibt_cp_reg_access { 376 __le32 addr; 377 __u8 mode; 378 __u8 len; 379 __u8 data[0]; 380 } __packed; 381 382 struct ibt_rp_reg_access { 383 __u8 status; 384 __le32 addr; 385 __u8 data[0]; 386 } __packed; 387 388 static int regmap_ibt_read(void *context, const void *addr, size_t reg_size, 389 void *val, size_t val_size) 390 { 391 struct regmap_ibt_context *ctx = context; 392 struct ibt_cp_reg_access cp; 393 struct ibt_rp_reg_access *rp; 394 struct sk_buff *skb; 395 int err = 0; 396 397 if (reg_size != sizeof(__le32)) 398 return -EINVAL; 399 400 switch (val_size) { 401 case 1: 402 cp.mode = IBT_REG_MODE_8BIT; 403 break; 404 case 2: 405 cp.mode = IBT_REG_MODE_16BIT; 406 break; 407 case 4: 408 cp.mode = IBT_REG_MODE_32BIT; 409 break; 410 default: 411 return -EINVAL; 412 } 413 414 /* regmap provides a little-endian formatted addr */ 415 cp.addr = *(__le32 *)addr; 416 cp.len = val_size; 417 418 bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr)); 419 420 skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp, 421 HCI_CMD_TIMEOUT); 422 if (IS_ERR(skb)) { 423 err = PTR_ERR(skb); 424 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)", 425 le32_to_cpu(cp.addr), err); 426 return err; 427 } 428 429 if (skb->len != sizeof(*rp) + val_size) { 430 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len", 431 le32_to_cpu(cp.addr)); 432 err = -EINVAL; 433 goto done; 434 } 435 436 rp = (struct ibt_rp_reg_access *)skb->data; 437 438 if (rp->addr != cp.addr) { 439 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr", 440 le32_to_cpu(rp->addr)); 441 err = -EINVAL; 442 goto done; 443 } 444 445 memcpy(val, rp->data, val_size); 446 447 done: 448 kfree_skb(skb); 449 return err; 450 } 451 452 static int regmap_ibt_gather_write(void *context, 453 const void *addr, size_t reg_size, 454 const void *val, size_t val_size) 455 { 456 struct regmap_ibt_context *ctx = context; 457 struct ibt_cp_reg_access *cp; 458 struct sk_buff *skb; 459 int plen = sizeof(*cp) + val_size; 460 u8 mode; 461 int err = 0; 462 463 if (reg_size != sizeof(__le32)) 464 return -EINVAL; 465 466 switch (val_size) { 467 case 1: 468 mode = IBT_REG_MODE_8BIT; 469 break; 470 case 2: 471 mode = IBT_REG_MODE_16BIT; 472 break; 473 case 4: 474 mode = IBT_REG_MODE_32BIT; 475 break; 476 default: 477 return -EINVAL; 478 } 479 480 cp = kmalloc(plen, GFP_KERNEL); 481 if (!cp) 482 return -ENOMEM; 483 484 /* regmap provides a little-endian formatted addr/value */ 485 cp->addr = *(__le32 *)addr; 486 cp->mode = mode; 487 cp->len = val_size; 488 memcpy(&cp->data, val, val_size); 489 490 bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr)); 491 492 skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT); 493 if (IS_ERR(skb)) { 494 err = PTR_ERR(skb); 495 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)", 496 le32_to_cpu(cp->addr), err); 497 goto done; 498 } 499 kfree_skb(skb); 500 501 done: 502 kfree(cp); 503 return err; 504 } 505 506 static int regmap_ibt_write(void *context, const void *data, size_t count) 507 { 508 /* data contains register+value, since we only support 32bit addr, 509 * minimum data size is 4 bytes. 510 */ 511 if (WARN_ONCE(count < 4, "Invalid register access")) 512 return -EINVAL; 513 514 return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4); 515 } 516 517 static void regmap_ibt_free_context(void *context) 518 { 519 kfree(context); 520 } 521 522 static struct regmap_bus regmap_ibt = { 523 .read = regmap_ibt_read, 524 .write = regmap_ibt_write, 525 .gather_write = regmap_ibt_gather_write, 526 .free_context = regmap_ibt_free_context, 527 .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, 528 .val_format_endian_default = REGMAP_ENDIAN_LITTLE, 529 }; 530 531 /* Config is the same for all register regions */ 532 static const struct regmap_config regmap_ibt_cfg = { 533 .name = "btintel_regmap", 534 .reg_bits = 32, 535 .val_bits = 32, 536 }; 537 538 struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, 539 u16 opcode_write) 540 { 541 struct regmap_ibt_context *ctx; 542 543 bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read, 544 opcode_write); 545 546 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 547 if (!ctx) 548 return ERR_PTR(-ENOMEM); 549 550 ctx->op_read = opcode_read; 551 ctx->op_write = opcode_write; 552 ctx->hdev = hdev; 553 554 return regmap_init(&hdev->dev, ®map_ibt, ctx, ®map_ibt_cfg); 555 } 556 EXPORT_SYMBOL_GPL(btintel_regmap_init); 557 558 int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param) 559 { 560 struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 }; 561 struct sk_buff *skb; 562 563 params.boot_param = cpu_to_le32(boot_param); 564 565 skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), ¶ms, 566 HCI_INIT_TIMEOUT); 567 if (IS_ERR(skb)) { 568 bt_dev_err(hdev, "Failed to send Intel Reset command"); 569 return PTR_ERR(skb); 570 } 571 572 kfree_skb(skb); 573 574 return 0; 575 } 576 EXPORT_SYMBOL_GPL(btintel_send_intel_reset); 577 578 int btintel_read_boot_params(struct hci_dev *hdev, 579 struct intel_boot_params *params) 580 { 581 struct sk_buff *skb; 582 583 skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT); 584 if (IS_ERR(skb)) { 585 bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)", 586 PTR_ERR(skb)); 587 return PTR_ERR(skb); 588 } 589 590 if (skb->len != sizeof(*params)) { 591 bt_dev_err(hdev, "Intel boot parameters size mismatch"); 592 kfree_skb(skb); 593 return -EILSEQ; 594 } 595 596 memcpy(params, skb->data, sizeof(*params)); 597 598 kfree_skb(skb); 599 600 if (params->status) { 601 bt_dev_err(hdev, "Intel boot parameters command failed (%02x)", 602 params->status); 603 return -bt_to_errno(params->status); 604 } 605 606 bt_dev_info(hdev, "Device revision is %u", 607 le16_to_cpu(params->dev_revid)); 608 609 bt_dev_info(hdev, "Secure boot is %s", 610 params->secure_boot ? "enabled" : "disabled"); 611 612 bt_dev_info(hdev, "OTP lock is %s", 613 params->otp_lock ? "enabled" : "disabled"); 614 615 bt_dev_info(hdev, "API lock is %s", 616 params->api_lock ? "enabled" : "disabled"); 617 618 bt_dev_info(hdev, "Debug lock is %s", 619 params->debug_lock ? "enabled" : "disabled"); 620 621 bt_dev_info(hdev, "Minimum firmware build %u week %u %u", 622 params->min_fw_build_nn, params->min_fw_build_cw, 623 2000 + params->min_fw_build_yy); 624 625 return 0; 626 } 627 EXPORT_SYMBOL_GPL(btintel_read_boot_params); 628 629 int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw, 630 u32 *boot_param) 631 { 632 int err; 633 const u8 *fw_ptr; 634 u32 frag_len; 635 636 /* Start the firmware download transaction with the Init fragment 637 * represented by the 128 bytes of CSS header. 638 */ 639 err = btintel_secure_send(hdev, 0x00, 128, fw->data); 640 if (err < 0) { 641 bt_dev_err(hdev, "Failed to send firmware header (%d)", err); 642 goto done; 643 } 644 645 /* Send the 256 bytes of public key information from the firmware 646 * as the PKey fragment. 647 */ 648 err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128); 649 if (err < 0) { 650 bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err); 651 goto done; 652 } 653 654 /* Send the 256 bytes of signature information from the firmware 655 * as the Sign fragment. 656 */ 657 err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388); 658 if (err < 0) { 659 bt_dev_err(hdev, "Failed to send firmware signature (%d)", err); 660 goto done; 661 } 662 663 fw_ptr = fw->data + 644; 664 frag_len = 0; 665 666 while (fw_ptr - fw->data < fw->size) { 667 struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len); 668 669 /* Each SKU has a different reset parameter to use in the 670 * HCI_Intel_Reset command and it is embedded in the firmware 671 * data. So, instead of using static value per SKU, check 672 * the firmware data and save it for later use. 673 */ 674 if (le16_to_cpu(cmd->opcode) == 0xfc0e) { 675 /* The boot parameter is the first 32-bit value 676 * and rest of 3 octets are reserved. 677 */ 678 *boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd)); 679 680 bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param); 681 } 682 683 frag_len += sizeof(*cmd) + cmd->plen; 684 685 /* The parameter length of the secure send command requires 686 * a 4 byte alignment. It happens so that the firmware file 687 * contains proper Intel_NOP commands to align the fragments 688 * as needed. 689 * 690 * Send set of commands with 4 byte alignment from the 691 * firmware data buffer as a single Data fragement. 692 */ 693 if (!(frag_len % 4)) { 694 err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr); 695 if (err < 0) { 696 bt_dev_err(hdev, 697 "Failed to send firmware data (%d)", 698 err); 699 goto done; 700 } 701 702 fw_ptr += frag_len; 703 frag_len = 0; 704 } 705 } 706 707 done: 708 return err; 709 } 710 EXPORT_SYMBOL_GPL(btintel_download_firmware); 711 712 void btintel_reset_to_bootloader(struct hci_dev *hdev) 713 { 714 struct intel_reset params; 715 struct sk_buff *skb; 716 717 /* Send Intel Reset command. This will result in 718 * re-enumeration of BT controller. 719 * 720 * Intel Reset parameter description: 721 * reset_type : 0x00 (Soft reset), 722 * 0x01 (Hard reset) 723 * patch_enable : 0x00 (Do not enable), 724 * 0x01 (Enable) 725 * ddc_reload : 0x00 (Do not reload), 726 * 0x01 (Reload) 727 * boot_option: 0x00 (Current image), 728 * 0x01 (Specified boot address) 729 * boot_param: Boot address 730 * 731 */ 732 params.reset_type = 0x01; 733 params.patch_enable = 0x01; 734 params.ddc_reload = 0x01; 735 params.boot_option = 0x00; 736 params.boot_param = cpu_to_le32(0x00000000); 737 738 skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), 739 ¶ms, HCI_INIT_TIMEOUT); 740 if (IS_ERR(skb)) { 741 bt_dev_err(hdev, "FW download error recovery failed (%ld)", 742 PTR_ERR(skb)); 743 return; 744 } 745 bt_dev_info(hdev, "Intel reset sent to retry FW download"); 746 kfree_skb(skb); 747 748 /* Current Intel BT controllers(ThP/JfP) hold the USB reset 749 * lines for 2ms when it receives Intel Reset in bootloader mode. 750 * Whereas, the upcoming Intel BT controllers will hold USB reset 751 * for 150ms. To keep the delay generic, 150ms is chosen here. 752 */ 753 msleep(150); 754 } 755 EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader); 756 757 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 758 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); 759 MODULE_VERSION(VERSION); 760 MODULE_LICENSE("GPL"); 761 MODULE_FIRMWARE("intel/ibt-11-5.sfi"); 762 MODULE_FIRMWARE("intel/ibt-11-5.ddc"); 763 MODULE_FIRMWARE("intel/ibt-12-16.sfi"); 764 MODULE_FIRMWARE("intel/ibt-12-16.ddc"); 765