1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Bluetooth support for Intel devices 5 * 6 * Copyright (C) 2015 Intel Corporation 7 */ 8 9 #include <linux/module.h> 10 #include <linux/firmware.h> 11 #include <linux/regmap.h> 12 #include <asm/unaligned.h> 13 14 #include <net/bluetooth/bluetooth.h> 15 #include <net/bluetooth/hci_core.h> 16 17 #include "btintel.h" 18 19 #define VERSION "0.1" 20 21 #define BDADDR_INTEL (&(bdaddr_t){{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}}) 22 #define RSA_HEADER_LEN 644 23 #define CSS_HEADER_OFFSET 8 24 #define ECDSA_OFFSET 644 25 #define ECDSA_HEADER_LEN 320 26 27 #define CMD_WRITE_BOOT_PARAMS 0xfc0e 28 struct cmd_write_boot_params { 29 u32 boot_addr; 30 u8 fw_build_num; 31 u8 fw_build_ww; 32 u8 fw_build_yy; 33 } __packed; 34 35 int btintel_check_bdaddr(struct hci_dev *hdev) 36 { 37 struct hci_rp_read_bd_addr *bda; 38 struct sk_buff *skb; 39 40 skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, 41 HCI_INIT_TIMEOUT); 42 if (IS_ERR(skb)) { 43 int err = PTR_ERR(skb); 44 bt_dev_err(hdev, "Reading Intel device address failed (%d)", 45 err); 46 return err; 47 } 48 49 if (skb->len != sizeof(*bda)) { 50 bt_dev_err(hdev, "Intel device address length mismatch"); 51 kfree_skb(skb); 52 return -EIO; 53 } 54 55 bda = (struct hci_rp_read_bd_addr *)skb->data; 56 57 /* For some Intel based controllers, the default Bluetooth device 58 * address 00:03:19:9E:8B:00 can be found. These controllers are 59 * fully operational, but have the danger of duplicate addresses 60 * and that in turn can cause problems with Bluetooth operation. 61 */ 62 if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) { 63 bt_dev_err(hdev, "Found Intel default device address (%pMR)", 64 &bda->bdaddr); 65 set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); 66 } 67 68 kfree_skb(skb); 69 70 return 0; 71 } 72 EXPORT_SYMBOL_GPL(btintel_check_bdaddr); 73 74 int btintel_enter_mfg(struct hci_dev *hdev) 75 { 76 static const u8 param[] = { 0x01, 0x00 }; 77 struct sk_buff *skb; 78 79 skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); 80 if (IS_ERR(skb)) { 81 bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)", 82 PTR_ERR(skb)); 83 return PTR_ERR(skb); 84 } 85 kfree_skb(skb); 86 87 return 0; 88 } 89 EXPORT_SYMBOL_GPL(btintel_enter_mfg); 90 91 int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched) 92 { 93 u8 param[] = { 0x00, 0x00 }; 94 struct sk_buff *skb; 95 96 /* The 2nd command parameter specifies the manufacturing exit method: 97 * 0x00: Just disable the manufacturing mode (0x00). 98 * 0x01: Disable manufacturing mode and reset with patches deactivated. 99 * 0x02: Disable manufacturing mode and reset with patches activated. 100 */ 101 if (reset) 102 param[1] |= patched ? 0x02 : 0x01; 103 104 skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); 105 if (IS_ERR(skb)) { 106 bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)", 107 PTR_ERR(skb)); 108 return PTR_ERR(skb); 109 } 110 kfree_skb(skb); 111 112 return 0; 113 } 114 EXPORT_SYMBOL_GPL(btintel_exit_mfg); 115 116 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) 117 { 118 struct sk_buff *skb; 119 int err; 120 121 skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT); 122 if (IS_ERR(skb)) { 123 err = PTR_ERR(skb); 124 bt_dev_err(hdev, "Changing Intel device address failed (%d)", 125 err); 126 return err; 127 } 128 kfree_skb(skb); 129 130 return 0; 131 } 132 EXPORT_SYMBOL_GPL(btintel_set_bdaddr); 133 134 int btintel_set_diag(struct hci_dev *hdev, bool enable) 135 { 136 struct sk_buff *skb; 137 u8 param[3]; 138 int err; 139 140 if (enable) { 141 param[0] = 0x03; 142 param[1] = 0x03; 143 param[2] = 0x03; 144 } else { 145 param[0] = 0x00; 146 param[1] = 0x00; 147 param[2] = 0x00; 148 } 149 150 skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT); 151 if (IS_ERR(skb)) { 152 err = PTR_ERR(skb); 153 if (err == -ENODATA) 154 goto done; 155 bt_dev_err(hdev, "Changing Intel diagnostic mode failed (%d)", 156 err); 157 return err; 158 } 159 kfree_skb(skb); 160 161 done: 162 btintel_set_event_mask(hdev, enable); 163 return 0; 164 } 165 EXPORT_SYMBOL_GPL(btintel_set_diag); 166 167 int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable) 168 { 169 int err, ret; 170 171 err = btintel_enter_mfg(hdev); 172 if (err) 173 return err; 174 175 ret = btintel_set_diag(hdev, enable); 176 177 err = btintel_exit_mfg(hdev, false, false); 178 if (err) 179 return err; 180 181 return ret; 182 } 183 EXPORT_SYMBOL_GPL(btintel_set_diag_mfg); 184 185 void btintel_hw_error(struct hci_dev *hdev, u8 code) 186 { 187 struct sk_buff *skb; 188 u8 type = 0x00; 189 190 bt_dev_err(hdev, "Hardware error 0x%2.2x", code); 191 192 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); 193 if (IS_ERR(skb)) { 194 bt_dev_err(hdev, "Reset after hardware error failed (%ld)", 195 PTR_ERR(skb)); 196 return; 197 } 198 kfree_skb(skb); 199 200 skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); 201 if (IS_ERR(skb)) { 202 bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)", 203 PTR_ERR(skb)); 204 return; 205 } 206 207 if (skb->len != 13) { 208 bt_dev_err(hdev, "Exception info size mismatch"); 209 kfree_skb(skb); 210 return; 211 } 212 213 bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1)); 214 215 kfree_skb(skb); 216 } 217 EXPORT_SYMBOL_GPL(btintel_hw_error); 218 219 int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) 220 { 221 const char *variant; 222 223 /* The hardware platform number has a fixed value of 0x37 and 224 * for now only accept this single value. 225 */ 226 if (ver->hw_platform != 0x37) { 227 bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", 228 ver->hw_platform); 229 return -EINVAL; 230 } 231 232 /* Check for supported iBT hardware variants of this firmware 233 * loading method. 234 * 235 * This check has been put in place to ensure correct forward 236 * compatibility options when newer hardware variants come along. 237 */ 238 switch (ver->hw_variant) { 239 case 0x0b: /* SfP */ 240 case 0x0c: /* WsP */ 241 case 0x11: /* JfP */ 242 case 0x12: /* ThP */ 243 case 0x13: /* HrP */ 244 case 0x14: /* CcP */ 245 break; 246 default: 247 bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", 248 ver->hw_variant); 249 return -EINVAL; 250 } 251 252 switch (ver->fw_variant) { 253 case 0x06: 254 variant = "Bootloader"; 255 break; 256 case 0x23: 257 variant = "Firmware"; 258 break; 259 default: 260 bt_dev_err(hdev, "Unsupported firmware variant(%02x)", ver->fw_variant); 261 return -EINVAL; 262 } 263 264 bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u", 265 variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, 266 ver->fw_build_num, ver->fw_build_ww, 267 2000 + ver->fw_build_yy); 268 269 return 0; 270 } 271 EXPORT_SYMBOL_GPL(btintel_version_info); 272 273 int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, 274 const void *param) 275 { 276 while (plen > 0) { 277 struct sk_buff *skb; 278 u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; 279 280 cmd_param[0] = fragment_type; 281 memcpy(cmd_param + 1, param, fragment_len); 282 283 skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, 284 cmd_param, HCI_INIT_TIMEOUT); 285 if (IS_ERR(skb)) 286 return PTR_ERR(skb); 287 288 kfree_skb(skb); 289 290 plen -= fragment_len; 291 param += fragment_len; 292 } 293 294 return 0; 295 } 296 EXPORT_SYMBOL_GPL(btintel_secure_send); 297 298 int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name) 299 { 300 const struct firmware *fw; 301 struct sk_buff *skb; 302 const u8 *fw_ptr; 303 int err; 304 305 err = request_firmware_direct(&fw, ddc_name, &hdev->dev); 306 if (err < 0) { 307 bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)", 308 ddc_name, err); 309 return err; 310 } 311 312 bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name); 313 314 fw_ptr = fw->data; 315 316 /* DDC file contains one or more DDC structure which has 317 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2). 318 */ 319 while (fw->size > fw_ptr - fw->data) { 320 u8 cmd_plen = fw_ptr[0] + sizeof(u8); 321 322 skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr, 323 HCI_INIT_TIMEOUT); 324 if (IS_ERR(skb)) { 325 bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)", 326 PTR_ERR(skb)); 327 release_firmware(fw); 328 return PTR_ERR(skb); 329 } 330 331 fw_ptr += cmd_plen; 332 kfree_skb(skb); 333 } 334 335 release_firmware(fw); 336 337 bt_dev_info(hdev, "Applying Intel DDC parameters completed"); 338 339 return 0; 340 } 341 EXPORT_SYMBOL_GPL(btintel_load_ddc_config); 342 343 int btintel_set_event_mask(struct hci_dev *hdev, bool debug) 344 { 345 u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 346 struct sk_buff *skb; 347 int err; 348 349 if (debug) 350 mask[1] |= 0x62; 351 352 skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT); 353 if (IS_ERR(skb)) { 354 err = PTR_ERR(skb); 355 bt_dev_err(hdev, "Setting Intel event mask failed (%d)", err); 356 return err; 357 } 358 kfree_skb(skb); 359 360 return 0; 361 } 362 EXPORT_SYMBOL_GPL(btintel_set_event_mask); 363 364 int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) 365 { 366 int err, ret; 367 368 err = btintel_enter_mfg(hdev); 369 if (err) 370 return err; 371 372 ret = btintel_set_event_mask(hdev, debug); 373 374 err = btintel_exit_mfg(hdev, false, false); 375 if (err) 376 return err; 377 378 return ret; 379 } 380 EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg); 381 382 int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver) 383 { 384 struct sk_buff *skb; 385 386 skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); 387 if (IS_ERR(skb)) { 388 bt_dev_err(hdev, "Reading Intel version information failed (%ld)", 389 PTR_ERR(skb)); 390 return PTR_ERR(skb); 391 } 392 393 if (skb->len != sizeof(*ver)) { 394 bt_dev_err(hdev, "Intel version event size mismatch"); 395 kfree_skb(skb); 396 return -EILSEQ; 397 } 398 399 memcpy(ver, skb->data, sizeof(*ver)); 400 401 kfree_skb(skb); 402 403 return 0; 404 } 405 EXPORT_SYMBOL_GPL(btintel_read_version); 406 407 int btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version) 408 { 409 const char *variant; 410 411 /* The hardware platform number has a fixed value of 0x37 and 412 * for now only accept this single value. 413 */ 414 if (INTEL_HW_PLATFORM(version->cnvi_bt) != 0x37) { 415 bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", 416 INTEL_HW_PLATFORM(version->cnvi_bt)); 417 return -EINVAL; 418 } 419 420 /* Check for supported iBT hardware variants of this firmware 421 * loading method. 422 * 423 * This check has been put in place to ensure correct forward 424 * compatibility options when newer hardware variants come along. 425 */ 426 switch (INTEL_HW_VARIANT(version->cnvi_bt)) { 427 case 0x17: /* TyP */ 428 case 0x18: /* Slr */ 429 case 0x19: /* Slr-F */ 430 break; 431 default: 432 bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", 433 INTEL_HW_VARIANT(version->cnvi_bt)); 434 return -EINVAL; 435 } 436 437 switch (version->img_type) { 438 case 0x01: 439 variant = "Bootloader"; 440 /* It is required that every single firmware fragment is acknowledged 441 * with a command complete event. If the boot parameters indicate 442 * that this bootloader does not send them, then abort the setup. 443 */ 444 if (version->limited_cce != 0x00) { 445 bt_dev_err(hdev, "Unsupported Intel firmware loading method (0x%x)", 446 version->limited_cce); 447 return -EINVAL; 448 } 449 450 /* Secure boot engine type should be either 1 (ECDSA) or 0 (RSA) */ 451 if (version->sbe_type > 0x01) { 452 bt_dev_err(hdev, "Unsupported Intel secure boot engine type (0x%x)", 453 version->sbe_type); 454 return -EINVAL; 455 } 456 457 bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id); 458 bt_dev_info(hdev, "Secure boot is %s", 459 version->secure_boot ? "enabled" : "disabled"); 460 bt_dev_info(hdev, "OTP lock is %s", 461 version->otp_lock ? "enabled" : "disabled"); 462 bt_dev_info(hdev, "API lock is %s", 463 version->api_lock ? "enabled" : "disabled"); 464 bt_dev_info(hdev, "Debug lock is %s", 465 version->debug_lock ? "enabled" : "disabled"); 466 bt_dev_info(hdev, "Minimum firmware build %u week %u %u", 467 version->min_fw_build_nn, version->min_fw_build_cw, 468 2000 + version->min_fw_build_yy); 469 break; 470 case 0x03: 471 variant = "Firmware"; 472 break; 473 default: 474 bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type); 475 return -EINVAL; 476 } 477 478 bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant, 479 2000 + (version->timestamp >> 8), version->timestamp & 0xff, 480 version->build_type, version->build_num); 481 482 return 0; 483 } 484 EXPORT_SYMBOL_GPL(btintel_version_info_tlv); 485 486 int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version) 487 { 488 struct sk_buff *skb; 489 const u8 param[1] = { 0xFF }; 490 491 if (!version) 492 return -EINVAL; 493 494 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT); 495 if (IS_ERR(skb)) { 496 bt_dev_err(hdev, "Reading Intel version information failed (%ld)", 497 PTR_ERR(skb)); 498 return PTR_ERR(skb); 499 } 500 501 if (skb->data[0]) { 502 bt_dev_err(hdev, "Intel Read Version command failed (%02x)", 503 skb->data[0]); 504 kfree_skb(skb); 505 return -EIO; 506 } 507 508 /* Consume Command Complete Status field */ 509 skb_pull(skb, 1); 510 511 /* Event parameters contatin multiple TLVs. Read each of them 512 * and only keep the required data. Also, it use existing legacy 513 * version field like hw_platform, hw_variant, and fw_variant 514 * to keep the existing setup flow 515 */ 516 while (skb->len) { 517 struct intel_tlv *tlv; 518 519 tlv = (struct intel_tlv *)skb->data; 520 switch (tlv->type) { 521 case INTEL_TLV_CNVI_TOP: 522 version->cnvi_top = get_unaligned_le32(tlv->val); 523 break; 524 case INTEL_TLV_CNVR_TOP: 525 version->cnvr_top = get_unaligned_le32(tlv->val); 526 break; 527 case INTEL_TLV_CNVI_BT: 528 version->cnvi_bt = get_unaligned_le32(tlv->val); 529 break; 530 case INTEL_TLV_CNVR_BT: 531 version->cnvr_bt = get_unaligned_le32(tlv->val); 532 break; 533 case INTEL_TLV_DEV_REV_ID: 534 version->dev_rev_id = get_unaligned_le16(tlv->val); 535 break; 536 case INTEL_TLV_IMAGE_TYPE: 537 version->img_type = tlv->val[0]; 538 break; 539 case INTEL_TLV_TIME_STAMP: 540 /* If image type is Operational firmware (0x03), then 541 * running FW Calendar Week and Year information can 542 * be extracted from Timestamp information 543 */ 544 version->min_fw_build_cw = tlv->val[0]; 545 version->min_fw_build_yy = tlv->val[1]; 546 version->timestamp = get_unaligned_le16(tlv->val); 547 break; 548 case INTEL_TLV_BUILD_TYPE: 549 version->build_type = tlv->val[0]; 550 break; 551 case INTEL_TLV_BUILD_NUM: 552 /* If image type is Operational firmware (0x03), then 553 * running FW build number can be extracted from the 554 * Build information 555 */ 556 version->min_fw_build_nn = tlv->val[0]; 557 version->build_num = get_unaligned_le32(tlv->val); 558 break; 559 case INTEL_TLV_SECURE_BOOT: 560 version->secure_boot = tlv->val[0]; 561 break; 562 case INTEL_TLV_OTP_LOCK: 563 version->otp_lock = tlv->val[0]; 564 break; 565 case INTEL_TLV_API_LOCK: 566 version->api_lock = tlv->val[0]; 567 break; 568 case INTEL_TLV_DEBUG_LOCK: 569 version->debug_lock = tlv->val[0]; 570 break; 571 case INTEL_TLV_MIN_FW: 572 version->min_fw_build_nn = tlv->val[0]; 573 version->min_fw_build_cw = tlv->val[1]; 574 version->min_fw_build_yy = tlv->val[2]; 575 break; 576 case INTEL_TLV_LIMITED_CCE: 577 version->limited_cce = tlv->val[0]; 578 break; 579 case INTEL_TLV_SBE_TYPE: 580 version->sbe_type = tlv->val[0]; 581 break; 582 case INTEL_TLV_OTP_BDADDR: 583 memcpy(&version->otp_bd_addr, tlv->val, tlv->len); 584 break; 585 default: 586 /* Ignore rest of information */ 587 break; 588 } 589 /* consume the current tlv and move to next*/ 590 skb_pull(skb, tlv->len + sizeof(*tlv)); 591 } 592 593 kfree_skb(skb); 594 return 0; 595 } 596 EXPORT_SYMBOL_GPL(btintel_read_version_tlv); 597 598 /* ------- REGMAP IBT SUPPORT ------- */ 599 600 #define IBT_REG_MODE_8BIT 0x00 601 #define IBT_REG_MODE_16BIT 0x01 602 #define IBT_REG_MODE_32BIT 0x02 603 604 struct regmap_ibt_context { 605 struct hci_dev *hdev; 606 __u16 op_write; 607 __u16 op_read; 608 }; 609 610 struct ibt_cp_reg_access { 611 __le32 addr; 612 __u8 mode; 613 __u8 len; 614 __u8 data[]; 615 } __packed; 616 617 struct ibt_rp_reg_access { 618 __u8 status; 619 __le32 addr; 620 __u8 data[]; 621 } __packed; 622 623 static int regmap_ibt_read(void *context, const void *addr, size_t reg_size, 624 void *val, size_t val_size) 625 { 626 struct regmap_ibt_context *ctx = context; 627 struct ibt_cp_reg_access cp; 628 struct ibt_rp_reg_access *rp; 629 struct sk_buff *skb; 630 int err = 0; 631 632 if (reg_size != sizeof(__le32)) 633 return -EINVAL; 634 635 switch (val_size) { 636 case 1: 637 cp.mode = IBT_REG_MODE_8BIT; 638 break; 639 case 2: 640 cp.mode = IBT_REG_MODE_16BIT; 641 break; 642 case 4: 643 cp.mode = IBT_REG_MODE_32BIT; 644 break; 645 default: 646 return -EINVAL; 647 } 648 649 /* regmap provides a little-endian formatted addr */ 650 cp.addr = *(__le32 *)addr; 651 cp.len = val_size; 652 653 bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr)); 654 655 skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp, 656 HCI_CMD_TIMEOUT); 657 if (IS_ERR(skb)) { 658 err = PTR_ERR(skb); 659 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)", 660 le32_to_cpu(cp.addr), err); 661 return err; 662 } 663 664 if (skb->len != sizeof(*rp) + val_size) { 665 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len", 666 le32_to_cpu(cp.addr)); 667 err = -EINVAL; 668 goto done; 669 } 670 671 rp = (struct ibt_rp_reg_access *)skb->data; 672 673 if (rp->addr != cp.addr) { 674 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr", 675 le32_to_cpu(rp->addr)); 676 err = -EINVAL; 677 goto done; 678 } 679 680 memcpy(val, rp->data, val_size); 681 682 done: 683 kfree_skb(skb); 684 return err; 685 } 686 687 static int regmap_ibt_gather_write(void *context, 688 const void *addr, size_t reg_size, 689 const void *val, size_t val_size) 690 { 691 struct regmap_ibt_context *ctx = context; 692 struct ibt_cp_reg_access *cp; 693 struct sk_buff *skb; 694 int plen = sizeof(*cp) + val_size; 695 u8 mode; 696 int err = 0; 697 698 if (reg_size != sizeof(__le32)) 699 return -EINVAL; 700 701 switch (val_size) { 702 case 1: 703 mode = IBT_REG_MODE_8BIT; 704 break; 705 case 2: 706 mode = IBT_REG_MODE_16BIT; 707 break; 708 case 4: 709 mode = IBT_REG_MODE_32BIT; 710 break; 711 default: 712 return -EINVAL; 713 } 714 715 cp = kmalloc(plen, GFP_KERNEL); 716 if (!cp) 717 return -ENOMEM; 718 719 /* regmap provides a little-endian formatted addr/value */ 720 cp->addr = *(__le32 *)addr; 721 cp->mode = mode; 722 cp->len = val_size; 723 memcpy(&cp->data, val, val_size); 724 725 bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr)); 726 727 skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT); 728 if (IS_ERR(skb)) { 729 err = PTR_ERR(skb); 730 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)", 731 le32_to_cpu(cp->addr), err); 732 goto done; 733 } 734 kfree_skb(skb); 735 736 done: 737 kfree(cp); 738 return err; 739 } 740 741 static int regmap_ibt_write(void *context, const void *data, size_t count) 742 { 743 /* data contains register+value, since we only support 32bit addr, 744 * minimum data size is 4 bytes. 745 */ 746 if (WARN_ONCE(count < 4, "Invalid register access")) 747 return -EINVAL; 748 749 return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4); 750 } 751 752 static void regmap_ibt_free_context(void *context) 753 { 754 kfree(context); 755 } 756 757 static struct regmap_bus regmap_ibt = { 758 .read = regmap_ibt_read, 759 .write = regmap_ibt_write, 760 .gather_write = regmap_ibt_gather_write, 761 .free_context = regmap_ibt_free_context, 762 .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, 763 .val_format_endian_default = REGMAP_ENDIAN_LITTLE, 764 }; 765 766 /* Config is the same for all register regions */ 767 static const struct regmap_config regmap_ibt_cfg = { 768 .name = "btintel_regmap", 769 .reg_bits = 32, 770 .val_bits = 32, 771 }; 772 773 struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, 774 u16 opcode_write) 775 { 776 struct regmap_ibt_context *ctx; 777 778 bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read, 779 opcode_write); 780 781 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 782 if (!ctx) 783 return ERR_PTR(-ENOMEM); 784 785 ctx->op_read = opcode_read; 786 ctx->op_write = opcode_write; 787 ctx->hdev = hdev; 788 789 return regmap_init(&hdev->dev, ®map_ibt, ctx, ®map_ibt_cfg); 790 } 791 EXPORT_SYMBOL_GPL(btintel_regmap_init); 792 793 int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param) 794 { 795 struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 }; 796 struct sk_buff *skb; 797 798 params.boot_param = cpu_to_le32(boot_param); 799 800 skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), ¶ms, 801 HCI_INIT_TIMEOUT); 802 if (IS_ERR(skb)) { 803 bt_dev_err(hdev, "Failed to send Intel Reset command"); 804 return PTR_ERR(skb); 805 } 806 807 kfree_skb(skb); 808 809 return 0; 810 } 811 EXPORT_SYMBOL_GPL(btintel_send_intel_reset); 812 813 int btintel_read_boot_params(struct hci_dev *hdev, 814 struct intel_boot_params *params) 815 { 816 struct sk_buff *skb; 817 818 skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT); 819 if (IS_ERR(skb)) { 820 bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)", 821 PTR_ERR(skb)); 822 return PTR_ERR(skb); 823 } 824 825 if (skb->len != sizeof(*params)) { 826 bt_dev_err(hdev, "Intel boot parameters size mismatch"); 827 kfree_skb(skb); 828 return -EILSEQ; 829 } 830 831 memcpy(params, skb->data, sizeof(*params)); 832 833 kfree_skb(skb); 834 835 if (params->status) { 836 bt_dev_err(hdev, "Intel boot parameters command failed (%02x)", 837 params->status); 838 return -bt_to_errno(params->status); 839 } 840 841 bt_dev_info(hdev, "Device revision is %u", 842 le16_to_cpu(params->dev_revid)); 843 844 bt_dev_info(hdev, "Secure boot is %s", 845 params->secure_boot ? "enabled" : "disabled"); 846 847 bt_dev_info(hdev, "OTP lock is %s", 848 params->otp_lock ? "enabled" : "disabled"); 849 850 bt_dev_info(hdev, "API lock is %s", 851 params->api_lock ? "enabled" : "disabled"); 852 853 bt_dev_info(hdev, "Debug lock is %s", 854 params->debug_lock ? "enabled" : "disabled"); 855 856 bt_dev_info(hdev, "Minimum firmware build %u week %u %u", 857 params->min_fw_build_nn, params->min_fw_build_cw, 858 2000 + params->min_fw_build_yy); 859 860 return 0; 861 } 862 EXPORT_SYMBOL_GPL(btintel_read_boot_params); 863 864 static int btintel_sfi_rsa_header_secure_send(struct hci_dev *hdev, 865 const struct firmware *fw) 866 { 867 int err; 868 869 /* Start the firmware download transaction with the Init fragment 870 * represented by the 128 bytes of CSS header. 871 */ 872 err = btintel_secure_send(hdev, 0x00, 128, fw->data); 873 if (err < 0) { 874 bt_dev_err(hdev, "Failed to send firmware header (%d)", err); 875 goto done; 876 } 877 878 /* Send the 256 bytes of public key information from the firmware 879 * as the PKey fragment. 880 */ 881 err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128); 882 if (err < 0) { 883 bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err); 884 goto done; 885 } 886 887 /* Send the 256 bytes of signature information from the firmware 888 * as the Sign fragment. 889 */ 890 err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388); 891 if (err < 0) { 892 bt_dev_err(hdev, "Failed to send firmware signature (%d)", err); 893 goto done; 894 } 895 896 done: 897 return err; 898 } 899 900 static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev, 901 const struct firmware *fw) 902 { 903 int err; 904 905 /* Start the firmware download transaction with the Init fragment 906 * represented by the 128 bytes of CSS header. 907 */ 908 err = btintel_secure_send(hdev, 0x00, 128, fw->data + 644); 909 if (err < 0) { 910 bt_dev_err(hdev, "Failed to send firmware header (%d)", err); 911 return err; 912 } 913 914 /* Send the 96 bytes of public key information from the firmware 915 * as the PKey fragment. 916 */ 917 err = btintel_secure_send(hdev, 0x03, 96, fw->data + 644 + 128); 918 if (err < 0) { 919 bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err); 920 return err; 921 } 922 923 /* Send the 96 bytes of signature information from the firmware 924 * as the Sign fragment 925 */ 926 err = btintel_secure_send(hdev, 0x02, 96, fw->data + 644 + 224); 927 if (err < 0) { 928 bt_dev_err(hdev, "Failed to send firmware signature (%d)", 929 err); 930 return err; 931 } 932 return 0; 933 } 934 935 static int btintel_download_firmware_payload(struct hci_dev *hdev, 936 const struct firmware *fw, 937 size_t offset) 938 { 939 int err; 940 const u8 *fw_ptr; 941 u32 frag_len; 942 943 fw_ptr = fw->data + offset; 944 frag_len = 0; 945 err = -EINVAL; 946 947 while (fw_ptr - fw->data < fw->size) { 948 struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len); 949 950 frag_len += sizeof(*cmd) + cmd->plen; 951 952 /* The parameter length of the secure send command requires 953 * a 4 byte alignment. It happens so that the firmware file 954 * contains proper Intel_NOP commands to align the fragments 955 * as needed. 956 * 957 * Send set of commands with 4 byte alignment from the 958 * firmware data buffer as a single Data fragement. 959 */ 960 if (!(frag_len % 4)) { 961 err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr); 962 if (err < 0) { 963 bt_dev_err(hdev, 964 "Failed to send firmware data (%d)", 965 err); 966 goto done; 967 } 968 969 fw_ptr += frag_len; 970 frag_len = 0; 971 } 972 } 973 974 done: 975 return err; 976 } 977 978 static bool btintel_firmware_version(struct hci_dev *hdev, 979 u8 num, u8 ww, u8 yy, 980 const struct firmware *fw, 981 u32 *boot_addr) 982 { 983 const u8 *fw_ptr; 984 985 fw_ptr = fw->data; 986 987 while (fw_ptr - fw->data < fw->size) { 988 struct hci_command_hdr *cmd = (void *)(fw_ptr); 989 990 /* Each SKU has a different reset parameter to use in the 991 * HCI_Intel_Reset command and it is embedded in the firmware 992 * data. So, instead of using static value per SKU, check 993 * the firmware data and save it for later use. 994 */ 995 if (le16_to_cpu(cmd->opcode) == CMD_WRITE_BOOT_PARAMS) { 996 struct cmd_write_boot_params *params; 997 998 params = (void *)(fw_ptr + sizeof(*cmd)); 999 1000 bt_dev_info(hdev, "Boot Address: 0x%x", 1001 le32_to_cpu(params->boot_addr)); 1002 1003 bt_dev_info(hdev, "Firmware Version: %u-%u.%u", 1004 params->fw_build_num, params->fw_build_ww, 1005 params->fw_build_yy); 1006 1007 return (num == params->fw_build_num && 1008 ww == params->fw_build_ww && 1009 yy == params->fw_build_yy); 1010 } 1011 1012 fw_ptr += sizeof(*cmd) + cmd->plen; 1013 } 1014 1015 return false; 1016 } 1017 1018 int btintel_download_firmware(struct hci_dev *hdev, 1019 struct intel_version *ver, 1020 const struct firmware *fw, 1021 u32 *boot_param) 1022 { 1023 int err; 1024 1025 /* SfP and WsP don't seem to update the firmware version on file 1026 * so version checking is currently not possible. 1027 */ 1028 switch (ver->hw_variant) { 1029 case 0x0b: /* SfP */ 1030 case 0x0c: /* WsP */ 1031 /* Skip version checking */ 1032 break; 1033 default: 1034 /* Skip reading firmware file version in bootloader mode */ 1035 if (ver->fw_variant == 0x06) 1036 break; 1037 1038 /* Skip download if firmware has the same version */ 1039 if (btintel_firmware_version(hdev, ver->fw_build_num, 1040 ver->fw_build_ww, ver->fw_build_yy, 1041 fw, boot_param)) { 1042 bt_dev_info(hdev, "Firmware already loaded"); 1043 /* Return -EALREADY to indicate that the firmware has 1044 * already been loaded. 1045 */ 1046 return -EALREADY; 1047 } 1048 } 1049 1050 /* The firmware variant determines if the device is in bootloader 1051 * mode or is running operational firmware. The value 0x06 identifies 1052 * the bootloader and the value 0x23 identifies the operational 1053 * firmware. 1054 * 1055 * If the firmware version has changed that means it needs to be reset 1056 * to bootloader when operational so the new firmware can be loaded. 1057 */ 1058 if (ver->fw_variant == 0x23) 1059 return -EINVAL; 1060 1061 err = btintel_sfi_rsa_header_secure_send(hdev, fw); 1062 if (err) 1063 return err; 1064 1065 return btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN); 1066 } 1067 EXPORT_SYMBOL_GPL(btintel_download_firmware); 1068 1069 int btintel_download_firmware_newgen(struct hci_dev *hdev, 1070 struct intel_version_tlv *ver, 1071 const struct firmware *fw, u32 *boot_param, 1072 u8 hw_variant, u8 sbe_type) 1073 { 1074 int err; 1075 u32 css_header_ver; 1076 1077 /* Skip reading firmware file version in bootloader mode */ 1078 if (ver->img_type != 0x01) { 1079 /* Skip download if firmware has the same version */ 1080 if (btintel_firmware_version(hdev, ver->min_fw_build_nn, 1081 ver->min_fw_build_cw, 1082 ver->min_fw_build_yy, 1083 fw, boot_param)) { 1084 bt_dev_info(hdev, "Firmware already loaded"); 1085 /* Return -EALREADY to indicate that firmware has 1086 * already been loaded. 1087 */ 1088 return -EALREADY; 1089 } 1090 } 1091 1092 /* The firmware variant determines if the device is in bootloader 1093 * mode or is running operational firmware. The value 0x01 identifies 1094 * the bootloader and the value 0x03 identifies the operational 1095 * firmware. 1096 * 1097 * If the firmware version has changed that means it needs to be reset 1098 * to bootloader when operational so the new firmware can be loaded. 1099 */ 1100 if (ver->img_type == 0x03) 1101 return -EINVAL; 1102 1103 /* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support 1104 * only RSA secure boot engine. Hence, the corresponding sfi file will 1105 * have RSA header of 644 bytes followed by Command Buffer. 1106 * 1107 * iBT hardware variants 0x17, 0x18 onwards support both RSA and ECDSA 1108 * secure boot engine. As a result, the corresponding sfi file will 1109 * have RSA header of 644, ECDSA header of 320 bytes followed by 1110 * Command Buffer. 1111 * 1112 * CSS Header byte positions 0x08 to 0x0B represent the CSS Header 1113 * version: RSA(0x00010000) , ECDSA (0x00020000) 1114 */ 1115 css_header_ver = get_unaligned_le32(fw->data + CSS_HEADER_OFFSET); 1116 if (css_header_ver != 0x00010000) { 1117 bt_dev_err(hdev, "Invalid CSS Header version"); 1118 return -EINVAL; 1119 } 1120 1121 if (hw_variant <= 0x14) { 1122 if (sbe_type != 0x00) { 1123 bt_dev_err(hdev, "Invalid SBE type for hardware variant (%d)", 1124 hw_variant); 1125 return -EINVAL; 1126 } 1127 1128 err = btintel_sfi_rsa_header_secure_send(hdev, fw); 1129 if (err) 1130 return err; 1131 1132 err = btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN); 1133 if (err) 1134 return err; 1135 } else if (hw_variant >= 0x17) { 1136 /* Check if CSS header for ECDSA follows the RSA header */ 1137 if (fw->data[ECDSA_OFFSET] != 0x06) 1138 return -EINVAL; 1139 1140 /* Check if the CSS Header version is ECDSA(0x00020000) */ 1141 css_header_ver = get_unaligned_le32(fw->data + ECDSA_OFFSET + CSS_HEADER_OFFSET); 1142 if (css_header_ver != 0x00020000) { 1143 bt_dev_err(hdev, "Invalid CSS Header version"); 1144 return -EINVAL; 1145 } 1146 1147 if (sbe_type == 0x00) { 1148 err = btintel_sfi_rsa_header_secure_send(hdev, fw); 1149 if (err) 1150 return err; 1151 1152 err = btintel_download_firmware_payload(hdev, fw, 1153 RSA_HEADER_LEN + ECDSA_HEADER_LEN); 1154 if (err) 1155 return err; 1156 } else if (sbe_type == 0x01) { 1157 err = btintel_sfi_ecdsa_header_secure_send(hdev, fw); 1158 if (err) 1159 return err; 1160 1161 err = btintel_download_firmware_payload(hdev, fw, 1162 RSA_HEADER_LEN + ECDSA_HEADER_LEN); 1163 if (err) 1164 return err; 1165 } 1166 } 1167 return 0; 1168 } 1169 EXPORT_SYMBOL_GPL(btintel_download_firmware_newgen); 1170 1171 void btintel_reset_to_bootloader(struct hci_dev *hdev) 1172 { 1173 struct intel_reset params; 1174 struct sk_buff *skb; 1175 1176 /* Send Intel Reset command. This will result in 1177 * re-enumeration of BT controller. 1178 * 1179 * Intel Reset parameter description: 1180 * reset_type : 0x00 (Soft reset), 1181 * 0x01 (Hard reset) 1182 * patch_enable : 0x00 (Do not enable), 1183 * 0x01 (Enable) 1184 * ddc_reload : 0x00 (Do not reload), 1185 * 0x01 (Reload) 1186 * boot_option: 0x00 (Current image), 1187 * 0x01 (Specified boot address) 1188 * boot_param: Boot address 1189 * 1190 */ 1191 params.reset_type = 0x01; 1192 params.patch_enable = 0x01; 1193 params.ddc_reload = 0x01; 1194 params.boot_option = 0x00; 1195 params.boot_param = cpu_to_le32(0x00000000); 1196 1197 skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), 1198 ¶ms, HCI_INIT_TIMEOUT); 1199 if (IS_ERR(skb)) { 1200 bt_dev_err(hdev, "FW download error recovery failed (%ld)", 1201 PTR_ERR(skb)); 1202 return; 1203 } 1204 bt_dev_info(hdev, "Intel reset sent to retry FW download"); 1205 kfree_skb(skb); 1206 1207 /* Current Intel BT controllers(ThP/JfP) hold the USB reset 1208 * lines for 2ms when it receives Intel Reset in bootloader mode. 1209 * Whereas, the upcoming Intel BT controllers will hold USB reset 1210 * for 150ms. To keep the delay generic, 150ms is chosen here. 1211 */ 1212 msleep(150); 1213 } 1214 EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader); 1215 1216 int btintel_read_debug_features(struct hci_dev *hdev, 1217 struct intel_debug_features *features) 1218 { 1219 struct sk_buff *skb; 1220 u8 page_no = 1; 1221 1222 /* Intel controller supports two pages, each page is of 128-bit 1223 * feature bit mask. And each bit defines specific feature support 1224 */ 1225 skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no, 1226 HCI_INIT_TIMEOUT); 1227 if (IS_ERR(skb)) { 1228 bt_dev_err(hdev, "Reading supported features failed (%ld)", 1229 PTR_ERR(skb)); 1230 return PTR_ERR(skb); 1231 } 1232 1233 if (skb->len != (sizeof(features->page1) + 3)) { 1234 bt_dev_err(hdev, "Supported features event size mismatch"); 1235 kfree_skb(skb); 1236 return -EILSEQ; 1237 } 1238 1239 memcpy(features->page1, skb->data + 3, sizeof(features->page1)); 1240 1241 /* Read the supported features page2 if required in future. 1242 */ 1243 kfree_skb(skb); 1244 return 0; 1245 } 1246 EXPORT_SYMBOL_GPL(btintel_read_debug_features); 1247 1248 int btintel_set_debug_features(struct hci_dev *hdev, 1249 const struct intel_debug_features *features) 1250 { 1251 u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00, 1252 0x00, 0x00, 0x00 }; 1253 struct sk_buff *skb; 1254 1255 if (!features) 1256 return -EINVAL; 1257 1258 if (!(features->page1[0] & 0x3f)) { 1259 bt_dev_info(hdev, "Telemetry exception format not supported"); 1260 return 0; 1261 } 1262 1263 skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT); 1264 if (IS_ERR(skb)) { 1265 bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)", 1266 PTR_ERR(skb)); 1267 return PTR_ERR(skb); 1268 } 1269 1270 kfree_skb(skb); 1271 return 0; 1272 } 1273 EXPORT_SYMBOL_GPL(btintel_set_debug_features); 1274 1275 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 1276 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); 1277 MODULE_VERSION(VERSION); 1278 MODULE_LICENSE("GPL"); 1279 MODULE_FIRMWARE("intel/ibt-11-5.sfi"); 1280 MODULE_FIRMWARE("intel/ibt-11-5.ddc"); 1281 MODULE_FIRMWARE("intel/ibt-12-16.sfi"); 1282 MODULE_FIRMWARE("intel/ibt-12-16.ddc"); 1283