1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Greybus interface code 4 * 5 * Copyright 2014 Google Inc. 6 * Copyright 2014 Linaro Ltd. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/greybus.h> 11 12 #include "greybus_trace.h" 13 14 #define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000 15 16 #define GB_INTERFACE_DEVICE_ID_BAD 0xff 17 18 #define GB_INTERFACE_AUTOSUSPEND_MS 3000 19 20 /* Time required for interface to enter standby before disabling REFCLK */ 21 #define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20 22 23 /* Don't-care selector index */ 24 #define DME_SELECTOR_INDEX_NULL 0 25 26 /* DME attributes */ 27 /* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */ 28 #define DME_T_TST_SRC_INCREMENT 0x4083 29 30 #define DME_DDBL1_MANUFACTURERID 0x5003 31 #define DME_DDBL1_PRODUCTID 0x5004 32 33 #define DME_TOSHIBA_GMP_VID 0x6000 34 #define DME_TOSHIBA_GMP_PID 0x6001 35 #define DME_TOSHIBA_GMP_SN0 0x6002 36 #define DME_TOSHIBA_GMP_SN1 0x6003 37 #define DME_TOSHIBA_GMP_INIT_STATUS 0x6101 38 39 /* DDBL1 Manufacturer and Product ids */ 40 #define TOSHIBA_DMID 0x0126 41 #define TOSHIBA_ES2_BRIDGE_DPID 0x1000 42 #define TOSHIBA_ES3_APBRIDGE_DPID 0x1001 43 #define TOSHIBA_ES3_GBPHY_DPID 0x1002 44 45 static int gb_interface_hibernate_link(struct gb_interface *intf); 46 static int gb_interface_refclk_set(struct gb_interface *intf, bool enable); 47 48 static int gb_interface_dme_attr_get(struct gb_interface *intf, 49 u16 attr, u32 *val) 50 { 51 return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id, 52 attr, DME_SELECTOR_INDEX_NULL, val); 53 } 54 55 static int gb_interface_read_ara_dme(struct gb_interface *intf) 56 { 57 u32 sn0, sn1; 58 int ret; 59 60 /* 61 * Unless this is a Toshiba bridge, bail out until we have defined 62 * standard GMP attributes. 63 */ 64 if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) { 65 dev_err(&intf->dev, "unknown manufacturer %08x\n", 66 intf->ddbl1_manufacturer_id); 67 return -ENODEV; 68 } 69 70 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID, 71 &intf->vendor_id); 72 if (ret) 73 return ret; 74 75 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID, 76 &intf->product_id); 77 if (ret) 78 return ret; 79 80 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0); 81 if (ret) 82 return ret; 83 84 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1); 85 if (ret) 86 return ret; 87 88 intf->serial_number = (u64)sn1 << 32 | sn0; 89 90 return 0; 91 } 92 93 static int gb_interface_read_dme(struct gb_interface *intf) 94 { 95 int ret; 96 97 /* DME attributes have already been read */ 98 if (intf->dme_read) 99 return 0; 100 101 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID, 102 &intf->ddbl1_manufacturer_id); 103 if (ret) 104 return ret; 105 106 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID, 107 &intf->ddbl1_product_id); 108 if (ret) 109 return ret; 110 111 if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID && 112 intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) { 113 intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS; 114 intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS; 115 } 116 117 ret = gb_interface_read_ara_dme(intf); 118 if (ret) 119 return ret; 120 121 intf->dme_read = true; 122 123 return 0; 124 } 125 126 static int gb_interface_route_create(struct gb_interface *intf) 127 { 128 struct gb_svc *svc = intf->hd->svc; 129 u8 intf_id = intf->interface_id; 130 u8 device_id; 131 int ret; 132 133 /* Allocate an interface device id. */ 134 ret = ida_simple_get(&svc->device_id_map, 135 GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1, 136 GFP_KERNEL); 137 if (ret < 0) { 138 dev_err(&intf->dev, "failed to allocate device id: %d\n", ret); 139 return ret; 140 } 141 device_id = ret; 142 143 ret = gb_svc_intf_device_id(svc, intf_id, device_id); 144 if (ret) { 145 dev_err(&intf->dev, "failed to set device id %u: %d\n", 146 device_id, ret); 147 goto err_ida_remove; 148 } 149 150 /* FIXME: Hard-coded AP device id. */ 151 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP, 152 intf_id, device_id); 153 if (ret) { 154 dev_err(&intf->dev, "failed to create route: %d\n", ret); 155 goto err_svc_id_free; 156 } 157 158 intf->device_id = device_id; 159 160 return 0; 161 162 err_svc_id_free: 163 /* 164 * XXX Should we tell SVC that this id doesn't belong to interface 165 * XXX anymore. 166 */ 167 err_ida_remove: 168 ida_simple_remove(&svc->device_id_map, device_id); 169 170 return ret; 171 } 172 173 static void gb_interface_route_destroy(struct gb_interface *intf) 174 { 175 struct gb_svc *svc = intf->hd->svc; 176 177 if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD) 178 return; 179 180 gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id); 181 ida_simple_remove(&svc->device_id_map, intf->device_id); 182 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; 183 } 184 185 /* Locking: Caller holds the interface mutex. */ 186 static int gb_interface_legacy_mode_switch(struct gb_interface *intf) 187 { 188 int ret; 189 190 dev_info(&intf->dev, "legacy mode switch detected\n"); 191 192 /* Mark as disconnected to prevent I/O during disable. */ 193 intf->disconnected = true; 194 gb_interface_disable(intf); 195 intf->disconnected = false; 196 197 ret = gb_interface_enable(intf); 198 if (ret) { 199 dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret); 200 gb_interface_deactivate(intf); 201 } 202 203 return ret; 204 } 205 206 void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, 207 u32 mailbox) 208 { 209 mutex_lock(&intf->mutex); 210 211 if (result) { 212 dev_warn(&intf->dev, 213 "mailbox event with UniPro error: 0x%04x\n", 214 result); 215 goto err_disable; 216 } 217 218 if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) { 219 dev_warn(&intf->dev, 220 "mailbox event with unexpected value: 0x%08x\n", 221 mailbox); 222 goto err_disable; 223 } 224 225 if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) { 226 gb_interface_legacy_mode_switch(intf); 227 goto out_unlock; 228 } 229 230 if (!intf->mode_switch) { 231 dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n", 232 mailbox); 233 goto err_disable; 234 } 235 236 dev_info(&intf->dev, "mode switch detected\n"); 237 238 complete(&intf->mode_switch_completion); 239 240 out_unlock: 241 mutex_unlock(&intf->mutex); 242 243 return; 244 245 err_disable: 246 gb_interface_disable(intf); 247 gb_interface_deactivate(intf); 248 mutex_unlock(&intf->mutex); 249 } 250 251 static void gb_interface_mode_switch_work(struct work_struct *work) 252 { 253 struct gb_interface *intf; 254 struct gb_control *control; 255 unsigned long timeout; 256 int ret; 257 258 intf = container_of(work, struct gb_interface, mode_switch_work); 259 260 mutex_lock(&intf->mutex); 261 /* Make sure interface is still enabled. */ 262 if (!intf->enabled) { 263 dev_dbg(&intf->dev, "mode switch aborted\n"); 264 intf->mode_switch = false; 265 mutex_unlock(&intf->mutex); 266 goto out_interface_put; 267 } 268 269 /* 270 * Prepare the control device for mode switch and make sure to get an 271 * extra reference before it goes away during interface disable. 272 */ 273 control = gb_control_get(intf->control); 274 gb_control_mode_switch_prepare(control); 275 gb_interface_disable(intf); 276 mutex_unlock(&intf->mutex); 277 278 timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT); 279 ret = wait_for_completion_interruptible_timeout( 280 &intf->mode_switch_completion, timeout); 281 282 /* Finalise control-connection mode switch. */ 283 gb_control_mode_switch_complete(control); 284 gb_control_put(control); 285 286 if (ret < 0) { 287 dev_err(&intf->dev, "mode switch interrupted\n"); 288 goto err_deactivate; 289 } else if (ret == 0) { 290 dev_err(&intf->dev, "mode switch timed out\n"); 291 goto err_deactivate; 292 } 293 294 /* Re-enable (re-enumerate) interface if still active. */ 295 mutex_lock(&intf->mutex); 296 intf->mode_switch = false; 297 if (intf->active) { 298 ret = gb_interface_enable(intf); 299 if (ret) { 300 dev_err(&intf->dev, "failed to re-enable interface: %d\n", 301 ret); 302 gb_interface_deactivate(intf); 303 } 304 } 305 mutex_unlock(&intf->mutex); 306 307 out_interface_put: 308 gb_interface_put(intf); 309 310 return; 311 312 err_deactivate: 313 mutex_lock(&intf->mutex); 314 intf->mode_switch = false; 315 gb_interface_deactivate(intf); 316 mutex_unlock(&intf->mutex); 317 318 gb_interface_put(intf); 319 } 320 321 int gb_interface_request_mode_switch(struct gb_interface *intf) 322 { 323 int ret = 0; 324 325 mutex_lock(&intf->mutex); 326 if (intf->mode_switch) { 327 ret = -EBUSY; 328 goto out_unlock; 329 } 330 331 intf->mode_switch = true; 332 reinit_completion(&intf->mode_switch_completion); 333 334 /* 335 * Get a reference to the interface device, which will be put once the 336 * mode switch is complete. 337 */ 338 get_device(&intf->dev); 339 340 if (!queue_work(system_long_wq, &intf->mode_switch_work)) { 341 put_device(&intf->dev); 342 ret = -EBUSY; 343 goto out_unlock; 344 } 345 346 out_unlock: 347 mutex_unlock(&intf->mutex); 348 349 return ret; 350 } 351 EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch); 352 353 /* 354 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the 355 * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and 356 * clear it after reading a non-zero value from it. 357 * 358 * FIXME: This is module-hardware dependent and needs to be extended for every 359 * type of module we want to support. 360 */ 361 static int gb_interface_read_and_clear_init_status(struct gb_interface *intf) 362 { 363 struct gb_host_device *hd = intf->hd; 364 unsigned long bootrom_quirks; 365 unsigned long s2l_quirks; 366 int ret; 367 u32 value; 368 u16 attr; 369 u8 init_status; 370 371 /* 372 * ES2 bridges use T_TstSrcIncrement for the init status. 373 * 374 * FIXME: Remove ES2 support 375 */ 376 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) 377 attr = DME_T_TST_SRC_INCREMENT; 378 else 379 attr = DME_TOSHIBA_GMP_INIT_STATUS; 380 381 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr, 382 DME_SELECTOR_INDEX_NULL, &value); 383 if (ret) 384 return ret; 385 386 /* 387 * A nonzero init status indicates the module has finished 388 * initializing. 389 */ 390 if (!value) { 391 dev_err(&intf->dev, "invalid init status\n"); 392 return -ENODEV; 393 } 394 395 /* 396 * Extract the init status. 397 * 398 * For ES2: We need to check lowest 8 bits of 'value'. 399 * For ES3: We need to check highest 8 bits out of 32 of 'value'. 400 * 401 * FIXME: Remove ES2 support 402 */ 403 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) 404 init_status = value & 0xff; 405 else 406 init_status = value >> 24; 407 408 /* 409 * Check if the interface is executing the quirky ES3 bootrom that, 410 * for example, requires E2EFC, CSD and CSV to be disabled. 411 */ 412 bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES | 413 GB_INTERFACE_QUIRK_FORCED_DISABLE | 414 GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH | 415 GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE; 416 417 s2l_quirks = GB_INTERFACE_QUIRK_NO_PM; 418 419 switch (init_status) { 420 case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED: 421 case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED: 422 intf->quirks |= bootrom_quirks; 423 break; 424 case GB_INIT_S2_LOADER_BOOT_STARTED: 425 /* S2 Loader doesn't support runtime PM */ 426 intf->quirks &= ~bootrom_quirks; 427 intf->quirks |= s2l_quirks; 428 break; 429 default: 430 intf->quirks &= ~bootrom_quirks; 431 intf->quirks &= ~s2l_quirks; 432 } 433 434 /* Clear the init status. */ 435 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr, 436 DME_SELECTOR_INDEX_NULL, 0); 437 } 438 439 /* interface sysfs attributes */ 440 #define gb_interface_attr(field, type) \ 441 static ssize_t field##_show(struct device *dev, \ 442 struct device_attribute *attr, \ 443 char *buf) \ 444 { \ 445 struct gb_interface *intf = to_gb_interface(dev); \ 446 return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \ 447 } \ 448 static DEVICE_ATTR_RO(field) 449 450 gb_interface_attr(ddbl1_manufacturer_id, "0x%08x"); 451 gb_interface_attr(ddbl1_product_id, "0x%08x"); 452 gb_interface_attr(interface_id, "%u"); 453 gb_interface_attr(vendor_id, "0x%08x"); 454 gb_interface_attr(product_id, "0x%08x"); 455 gb_interface_attr(serial_number, "0x%016llx"); 456 457 static ssize_t voltage_now_show(struct device *dev, 458 struct device_attribute *attr, char *buf) 459 { 460 struct gb_interface *intf = to_gb_interface(dev); 461 int ret; 462 u32 measurement; 463 464 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, 465 GB_SVC_PWRMON_TYPE_VOL, 466 &measurement); 467 if (ret) { 468 dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret); 469 return ret; 470 } 471 472 return sprintf(buf, "%u\n", measurement); 473 } 474 static DEVICE_ATTR_RO(voltage_now); 475 476 static ssize_t current_now_show(struct device *dev, 477 struct device_attribute *attr, char *buf) 478 { 479 struct gb_interface *intf = to_gb_interface(dev); 480 int ret; 481 u32 measurement; 482 483 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, 484 GB_SVC_PWRMON_TYPE_CURR, 485 &measurement); 486 if (ret) { 487 dev_err(&intf->dev, "failed to get current sample (%d)\n", ret); 488 return ret; 489 } 490 491 return sprintf(buf, "%u\n", measurement); 492 } 493 static DEVICE_ATTR_RO(current_now); 494 495 static ssize_t power_now_show(struct device *dev, 496 struct device_attribute *attr, char *buf) 497 { 498 struct gb_interface *intf = to_gb_interface(dev); 499 int ret; 500 u32 measurement; 501 502 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, 503 GB_SVC_PWRMON_TYPE_PWR, 504 &measurement); 505 if (ret) { 506 dev_err(&intf->dev, "failed to get power sample (%d)\n", ret); 507 return ret; 508 } 509 510 return sprintf(buf, "%u\n", measurement); 511 } 512 static DEVICE_ATTR_RO(power_now); 513 514 static ssize_t power_state_show(struct device *dev, 515 struct device_attribute *attr, char *buf) 516 { 517 struct gb_interface *intf = to_gb_interface(dev); 518 519 if (intf->active) 520 return scnprintf(buf, PAGE_SIZE, "on\n"); 521 else 522 return scnprintf(buf, PAGE_SIZE, "off\n"); 523 } 524 525 static ssize_t power_state_store(struct device *dev, 526 struct device_attribute *attr, const char *buf, 527 size_t len) 528 { 529 struct gb_interface *intf = to_gb_interface(dev); 530 bool activate; 531 int ret = 0; 532 533 if (kstrtobool(buf, &activate)) 534 return -EINVAL; 535 536 mutex_lock(&intf->mutex); 537 538 if (activate == intf->active) 539 goto unlock; 540 541 if (activate) { 542 ret = gb_interface_activate(intf); 543 if (ret) { 544 dev_err(&intf->dev, 545 "failed to activate interface: %d\n", ret); 546 goto unlock; 547 } 548 549 ret = gb_interface_enable(intf); 550 if (ret) { 551 dev_err(&intf->dev, 552 "failed to enable interface: %d\n", ret); 553 gb_interface_deactivate(intf); 554 goto unlock; 555 } 556 } else { 557 gb_interface_disable(intf); 558 gb_interface_deactivate(intf); 559 } 560 561 unlock: 562 mutex_unlock(&intf->mutex); 563 564 if (ret) 565 return ret; 566 567 return len; 568 } 569 static DEVICE_ATTR_RW(power_state); 570 571 static const char *gb_interface_type_string(struct gb_interface *intf) 572 { 573 static const char * const types[] = { 574 [GB_INTERFACE_TYPE_INVALID] = "invalid", 575 [GB_INTERFACE_TYPE_UNKNOWN] = "unknown", 576 [GB_INTERFACE_TYPE_DUMMY] = "dummy", 577 [GB_INTERFACE_TYPE_UNIPRO] = "unipro", 578 [GB_INTERFACE_TYPE_GREYBUS] = "greybus", 579 }; 580 581 return types[intf->type]; 582 } 583 584 static ssize_t interface_type_show(struct device *dev, 585 struct device_attribute *attr, char *buf) 586 { 587 struct gb_interface *intf = to_gb_interface(dev); 588 589 return sprintf(buf, "%s\n", gb_interface_type_string(intf)); 590 } 591 static DEVICE_ATTR_RO(interface_type); 592 593 static struct attribute *interface_unipro_attrs[] = { 594 &dev_attr_ddbl1_manufacturer_id.attr, 595 &dev_attr_ddbl1_product_id.attr, 596 NULL 597 }; 598 599 static struct attribute *interface_greybus_attrs[] = { 600 &dev_attr_vendor_id.attr, 601 &dev_attr_product_id.attr, 602 &dev_attr_serial_number.attr, 603 NULL 604 }; 605 606 static struct attribute *interface_power_attrs[] = { 607 &dev_attr_voltage_now.attr, 608 &dev_attr_current_now.attr, 609 &dev_attr_power_now.attr, 610 &dev_attr_power_state.attr, 611 NULL 612 }; 613 614 static struct attribute *interface_common_attrs[] = { 615 &dev_attr_interface_id.attr, 616 &dev_attr_interface_type.attr, 617 NULL 618 }; 619 620 static umode_t interface_unipro_is_visible(struct kobject *kobj, 621 struct attribute *attr, int n) 622 { 623 struct device *dev = kobj_to_dev(kobj); 624 struct gb_interface *intf = to_gb_interface(dev); 625 626 switch (intf->type) { 627 case GB_INTERFACE_TYPE_UNIPRO: 628 case GB_INTERFACE_TYPE_GREYBUS: 629 return attr->mode; 630 default: 631 return 0; 632 } 633 } 634 635 static umode_t interface_greybus_is_visible(struct kobject *kobj, 636 struct attribute *attr, int n) 637 { 638 struct device *dev = kobj_to_dev(kobj); 639 struct gb_interface *intf = to_gb_interface(dev); 640 641 switch (intf->type) { 642 case GB_INTERFACE_TYPE_GREYBUS: 643 return attr->mode; 644 default: 645 return 0; 646 } 647 } 648 649 static umode_t interface_power_is_visible(struct kobject *kobj, 650 struct attribute *attr, int n) 651 { 652 struct device *dev = kobj_to_dev(kobj); 653 struct gb_interface *intf = to_gb_interface(dev); 654 655 switch (intf->type) { 656 case GB_INTERFACE_TYPE_UNIPRO: 657 case GB_INTERFACE_TYPE_GREYBUS: 658 return attr->mode; 659 default: 660 return 0; 661 } 662 } 663 664 static const struct attribute_group interface_unipro_group = { 665 .is_visible = interface_unipro_is_visible, 666 .attrs = interface_unipro_attrs, 667 }; 668 669 static const struct attribute_group interface_greybus_group = { 670 .is_visible = interface_greybus_is_visible, 671 .attrs = interface_greybus_attrs, 672 }; 673 674 static const struct attribute_group interface_power_group = { 675 .is_visible = interface_power_is_visible, 676 .attrs = interface_power_attrs, 677 }; 678 679 static const struct attribute_group interface_common_group = { 680 .attrs = interface_common_attrs, 681 }; 682 683 static const struct attribute_group *interface_groups[] = { 684 &interface_unipro_group, 685 &interface_greybus_group, 686 &interface_power_group, 687 &interface_common_group, 688 NULL 689 }; 690 691 static void gb_interface_release(struct device *dev) 692 { 693 struct gb_interface *intf = to_gb_interface(dev); 694 695 trace_gb_interface_release(intf); 696 697 cancel_work_sync(&intf->mode_switch_work); 698 kfree(intf); 699 } 700 701 #ifdef CONFIG_PM 702 static int gb_interface_suspend(struct device *dev) 703 { 704 struct gb_interface *intf = to_gb_interface(dev); 705 int ret; 706 707 ret = gb_control_interface_suspend_prepare(intf->control); 708 if (ret) 709 return ret; 710 711 ret = gb_control_suspend(intf->control); 712 if (ret) 713 goto err_hibernate_abort; 714 715 ret = gb_interface_hibernate_link(intf); 716 if (ret) 717 return ret; 718 719 /* Delay to allow interface to enter standby before disabling refclk */ 720 msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS); 721 722 ret = gb_interface_refclk_set(intf, false); 723 if (ret) 724 return ret; 725 726 return 0; 727 728 err_hibernate_abort: 729 gb_control_interface_hibernate_abort(intf->control); 730 731 return ret; 732 } 733 734 static int gb_interface_resume(struct device *dev) 735 { 736 struct gb_interface *intf = to_gb_interface(dev); 737 struct gb_svc *svc = intf->hd->svc; 738 int ret; 739 740 ret = gb_interface_refclk_set(intf, true); 741 if (ret) 742 return ret; 743 744 ret = gb_svc_intf_resume(svc, intf->interface_id); 745 if (ret) 746 return ret; 747 748 ret = gb_control_resume(intf->control); 749 if (ret) 750 return ret; 751 752 return 0; 753 } 754 755 static int gb_interface_runtime_idle(struct device *dev) 756 { 757 pm_runtime_mark_last_busy(dev); 758 pm_request_autosuspend(dev); 759 760 return 0; 761 } 762 #endif 763 764 static const struct dev_pm_ops gb_interface_pm_ops = { 765 SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume, 766 gb_interface_runtime_idle) 767 }; 768 769 struct device_type greybus_interface_type = { 770 .name = "greybus_interface", 771 .release = gb_interface_release, 772 .pm = &gb_interface_pm_ops, 773 }; 774 775 /* 776 * A Greybus module represents a user-replaceable component on a GMP 777 * phone. An interface is the physical connection on that module. A 778 * module may have more than one interface. 779 * 780 * Create a gb_interface structure to represent a discovered interface. 781 * The position of interface within the Endo is encoded in "interface_id" 782 * argument. 783 * 784 * Returns a pointer to the new interfce or a null pointer if a 785 * failure occurs due to memory exhaustion. 786 */ 787 struct gb_interface *gb_interface_create(struct gb_module *module, 788 u8 interface_id) 789 { 790 struct gb_host_device *hd = module->hd; 791 struct gb_interface *intf; 792 793 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 794 if (!intf) 795 return NULL; 796 797 intf->hd = hd; /* XXX refcount? */ 798 intf->module = module; 799 intf->interface_id = interface_id; 800 INIT_LIST_HEAD(&intf->bundles); 801 INIT_LIST_HEAD(&intf->manifest_descs); 802 mutex_init(&intf->mutex); 803 INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work); 804 init_completion(&intf->mode_switch_completion); 805 806 /* Invalid device id to start with */ 807 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; 808 809 intf->dev.parent = &module->dev; 810 intf->dev.bus = &greybus_bus_type; 811 intf->dev.type = &greybus_interface_type; 812 intf->dev.groups = interface_groups; 813 intf->dev.dma_mask = module->dev.dma_mask; 814 device_initialize(&intf->dev); 815 dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev), 816 interface_id); 817 818 pm_runtime_set_autosuspend_delay(&intf->dev, 819 GB_INTERFACE_AUTOSUSPEND_MS); 820 821 trace_gb_interface_create(intf); 822 823 return intf; 824 } 825 826 static int gb_interface_vsys_set(struct gb_interface *intf, bool enable) 827 { 828 struct gb_svc *svc = intf->hd->svc; 829 int ret; 830 831 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); 832 833 ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable); 834 if (ret) { 835 dev_err(&intf->dev, "failed to set v_sys: %d\n", ret); 836 return ret; 837 } 838 839 return 0; 840 } 841 842 static int gb_interface_refclk_set(struct gb_interface *intf, bool enable) 843 { 844 struct gb_svc *svc = intf->hd->svc; 845 int ret; 846 847 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); 848 849 ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable); 850 if (ret) { 851 dev_err(&intf->dev, "failed to set refclk: %d\n", ret); 852 return ret; 853 } 854 855 return 0; 856 } 857 858 static int gb_interface_unipro_set(struct gb_interface *intf, bool enable) 859 { 860 struct gb_svc *svc = intf->hd->svc; 861 int ret; 862 863 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); 864 865 ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable); 866 if (ret) { 867 dev_err(&intf->dev, "failed to set UniPro: %d\n", ret); 868 return ret; 869 } 870 871 return 0; 872 } 873 874 static int gb_interface_activate_operation(struct gb_interface *intf, 875 enum gb_interface_type *intf_type) 876 { 877 struct gb_svc *svc = intf->hd->svc; 878 u8 type; 879 int ret; 880 881 dev_dbg(&intf->dev, "%s\n", __func__); 882 883 ret = gb_svc_intf_activate(svc, intf->interface_id, &type); 884 if (ret) { 885 dev_err(&intf->dev, "failed to activate: %d\n", ret); 886 return ret; 887 } 888 889 switch (type) { 890 case GB_SVC_INTF_TYPE_DUMMY: 891 *intf_type = GB_INTERFACE_TYPE_DUMMY; 892 /* FIXME: handle as an error for now */ 893 return -ENODEV; 894 case GB_SVC_INTF_TYPE_UNIPRO: 895 *intf_type = GB_INTERFACE_TYPE_UNIPRO; 896 dev_err(&intf->dev, "interface type UniPro not supported\n"); 897 /* FIXME: handle as an error for now */ 898 return -ENODEV; 899 case GB_SVC_INTF_TYPE_GREYBUS: 900 *intf_type = GB_INTERFACE_TYPE_GREYBUS; 901 break; 902 default: 903 dev_err(&intf->dev, "unknown interface type: %u\n", type); 904 *intf_type = GB_INTERFACE_TYPE_UNKNOWN; 905 return -ENODEV; 906 } 907 908 return 0; 909 } 910 911 static int gb_interface_hibernate_link(struct gb_interface *intf) 912 { 913 struct gb_svc *svc = intf->hd->svc; 914 915 return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id); 916 } 917 918 static int _gb_interface_activate(struct gb_interface *intf, 919 enum gb_interface_type *type) 920 { 921 int ret; 922 923 *type = GB_INTERFACE_TYPE_UNKNOWN; 924 925 if (intf->ejected || intf->removed) 926 return -ENODEV; 927 928 ret = gb_interface_vsys_set(intf, true); 929 if (ret) 930 return ret; 931 932 ret = gb_interface_refclk_set(intf, true); 933 if (ret) 934 goto err_vsys_disable; 935 936 ret = gb_interface_unipro_set(intf, true); 937 if (ret) 938 goto err_refclk_disable; 939 940 ret = gb_interface_activate_operation(intf, type); 941 if (ret) { 942 switch (*type) { 943 case GB_INTERFACE_TYPE_UNIPRO: 944 case GB_INTERFACE_TYPE_GREYBUS: 945 goto err_hibernate_link; 946 default: 947 goto err_unipro_disable; 948 } 949 } 950 951 ret = gb_interface_read_dme(intf); 952 if (ret) 953 goto err_hibernate_link; 954 955 ret = gb_interface_route_create(intf); 956 if (ret) 957 goto err_hibernate_link; 958 959 intf->active = true; 960 961 trace_gb_interface_activate(intf); 962 963 return 0; 964 965 err_hibernate_link: 966 gb_interface_hibernate_link(intf); 967 err_unipro_disable: 968 gb_interface_unipro_set(intf, false); 969 err_refclk_disable: 970 gb_interface_refclk_set(intf, false); 971 err_vsys_disable: 972 gb_interface_vsys_set(intf, false); 973 974 return ret; 975 } 976 977 /* 978 * At present, we assume a UniPro-only module to be a Greybus module that 979 * failed to send its mailbox poke. There is some reason to believe that this 980 * is because of a bug in the ES3 bootrom. 981 * 982 * FIXME: Check if this is a Toshiba bridge before retrying? 983 */ 984 static int _gb_interface_activate_es3_hack(struct gb_interface *intf, 985 enum gb_interface_type *type) 986 { 987 int retries = 3; 988 int ret; 989 990 while (retries--) { 991 ret = _gb_interface_activate(intf, type); 992 if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO) 993 continue; 994 995 break; 996 } 997 998 return ret; 999 } 1000 1001 /* 1002 * Activate an interface. 1003 * 1004 * Locking: Caller holds the interface mutex. 1005 */ 1006 int gb_interface_activate(struct gb_interface *intf) 1007 { 1008 enum gb_interface_type type; 1009 int ret; 1010 1011 switch (intf->type) { 1012 case GB_INTERFACE_TYPE_INVALID: 1013 case GB_INTERFACE_TYPE_GREYBUS: 1014 ret = _gb_interface_activate_es3_hack(intf, &type); 1015 break; 1016 default: 1017 ret = _gb_interface_activate(intf, &type); 1018 } 1019 1020 /* Make sure type is detected correctly during reactivation. */ 1021 if (intf->type != GB_INTERFACE_TYPE_INVALID) { 1022 if (type != intf->type) { 1023 dev_err(&intf->dev, "failed to detect interface type\n"); 1024 1025 if (!ret) 1026 gb_interface_deactivate(intf); 1027 1028 return -EIO; 1029 } 1030 } else { 1031 intf->type = type; 1032 } 1033 1034 return ret; 1035 } 1036 1037 /* 1038 * Deactivate an interface. 1039 * 1040 * Locking: Caller holds the interface mutex. 1041 */ 1042 void gb_interface_deactivate(struct gb_interface *intf) 1043 { 1044 if (!intf->active) 1045 return; 1046 1047 trace_gb_interface_deactivate(intf); 1048 1049 /* Abort any ongoing mode switch. */ 1050 if (intf->mode_switch) 1051 complete(&intf->mode_switch_completion); 1052 1053 gb_interface_route_destroy(intf); 1054 gb_interface_hibernate_link(intf); 1055 gb_interface_unipro_set(intf, false); 1056 gb_interface_refclk_set(intf, false); 1057 gb_interface_vsys_set(intf, false); 1058 1059 intf->active = false; 1060 } 1061 1062 /* 1063 * Enable an interface by enabling its control connection, fetching the 1064 * manifest and other information over it, and finally registering its child 1065 * devices. 1066 * 1067 * Locking: Caller holds the interface mutex. 1068 */ 1069 int gb_interface_enable(struct gb_interface *intf) 1070 { 1071 struct gb_control *control; 1072 struct gb_bundle *bundle, *tmp; 1073 int ret, size; 1074 void *manifest; 1075 1076 ret = gb_interface_read_and_clear_init_status(intf); 1077 if (ret) { 1078 dev_err(&intf->dev, "failed to clear init status: %d\n", ret); 1079 return ret; 1080 } 1081 1082 /* Establish control connection */ 1083 control = gb_control_create(intf); 1084 if (IS_ERR(control)) { 1085 dev_err(&intf->dev, "failed to create control device: %ld\n", 1086 PTR_ERR(control)); 1087 return PTR_ERR(control); 1088 } 1089 intf->control = control; 1090 1091 ret = gb_control_enable(intf->control); 1092 if (ret) 1093 goto err_put_control; 1094 1095 /* Get manifest size using control protocol on CPort */ 1096 size = gb_control_get_manifest_size_operation(intf); 1097 if (size <= 0) { 1098 dev_err(&intf->dev, "failed to get manifest size: %d\n", size); 1099 1100 if (size) 1101 ret = size; 1102 else 1103 ret = -EINVAL; 1104 1105 goto err_disable_control; 1106 } 1107 1108 manifest = kmalloc(size, GFP_KERNEL); 1109 if (!manifest) { 1110 ret = -ENOMEM; 1111 goto err_disable_control; 1112 } 1113 1114 /* Get manifest using control protocol on CPort */ 1115 ret = gb_control_get_manifest_operation(intf, manifest, size); 1116 if (ret) { 1117 dev_err(&intf->dev, "failed to get manifest: %d\n", ret); 1118 goto err_free_manifest; 1119 } 1120 1121 /* 1122 * Parse the manifest and build up our data structures representing 1123 * what's in it. 1124 */ 1125 if (!gb_manifest_parse(intf, manifest, size)) { 1126 dev_err(&intf->dev, "failed to parse manifest\n"); 1127 ret = -EINVAL; 1128 goto err_destroy_bundles; 1129 } 1130 1131 ret = gb_control_get_bundle_versions(intf->control); 1132 if (ret) 1133 goto err_destroy_bundles; 1134 1135 /* Register the control device and any bundles */ 1136 ret = gb_control_add(intf->control); 1137 if (ret) 1138 goto err_destroy_bundles; 1139 1140 pm_runtime_use_autosuspend(&intf->dev); 1141 pm_runtime_get_noresume(&intf->dev); 1142 pm_runtime_set_active(&intf->dev); 1143 pm_runtime_enable(&intf->dev); 1144 1145 list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) { 1146 ret = gb_bundle_add(bundle); 1147 if (ret) { 1148 gb_bundle_destroy(bundle); 1149 continue; 1150 } 1151 } 1152 1153 kfree(manifest); 1154 1155 intf->enabled = true; 1156 1157 pm_runtime_put(&intf->dev); 1158 1159 trace_gb_interface_enable(intf); 1160 1161 return 0; 1162 1163 err_destroy_bundles: 1164 list_for_each_entry_safe(bundle, tmp, &intf->bundles, links) 1165 gb_bundle_destroy(bundle); 1166 err_free_manifest: 1167 kfree(manifest); 1168 err_disable_control: 1169 gb_control_disable(intf->control); 1170 err_put_control: 1171 gb_control_put(intf->control); 1172 intf->control = NULL; 1173 1174 return ret; 1175 } 1176 1177 /* 1178 * Disable an interface and destroy its bundles. 1179 * 1180 * Locking: Caller holds the interface mutex. 1181 */ 1182 void gb_interface_disable(struct gb_interface *intf) 1183 { 1184 struct gb_bundle *bundle; 1185 struct gb_bundle *next; 1186 1187 if (!intf->enabled) 1188 return; 1189 1190 trace_gb_interface_disable(intf); 1191 1192 pm_runtime_get_sync(&intf->dev); 1193 1194 /* Set disconnected flag to avoid I/O during connection tear down. */ 1195 if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE) 1196 intf->disconnected = true; 1197 1198 list_for_each_entry_safe(bundle, next, &intf->bundles, links) 1199 gb_bundle_destroy(bundle); 1200 1201 if (!intf->mode_switch && !intf->disconnected) 1202 gb_control_interface_deactivate_prepare(intf->control); 1203 1204 gb_control_del(intf->control); 1205 gb_control_disable(intf->control); 1206 gb_control_put(intf->control); 1207 intf->control = NULL; 1208 1209 intf->enabled = false; 1210 1211 pm_runtime_disable(&intf->dev); 1212 pm_runtime_set_suspended(&intf->dev); 1213 pm_runtime_dont_use_autosuspend(&intf->dev); 1214 pm_runtime_put_noidle(&intf->dev); 1215 } 1216 1217 /* Register an interface. */ 1218 int gb_interface_add(struct gb_interface *intf) 1219 { 1220 int ret; 1221 1222 ret = device_add(&intf->dev); 1223 if (ret) { 1224 dev_err(&intf->dev, "failed to register interface: %d\n", ret); 1225 return ret; 1226 } 1227 1228 trace_gb_interface_add(intf); 1229 1230 dev_info(&intf->dev, "Interface added (%s)\n", 1231 gb_interface_type_string(intf)); 1232 1233 switch (intf->type) { 1234 case GB_INTERFACE_TYPE_GREYBUS: 1235 dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n", 1236 intf->vendor_id, intf->product_id); 1237 fallthrough; 1238 case GB_INTERFACE_TYPE_UNIPRO: 1239 dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n", 1240 intf->ddbl1_manufacturer_id, 1241 intf->ddbl1_product_id); 1242 break; 1243 default: 1244 break; 1245 } 1246 1247 return 0; 1248 } 1249 1250 /* Deregister an interface. */ 1251 void gb_interface_del(struct gb_interface *intf) 1252 { 1253 if (device_is_registered(&intf->dev)) { 1254 trace_gb_interface_del(intf); 1255 1256 device_del(&intf->dev); 1257 dev_info(&intf->dev, "Interface removed\n"); 1258 } 1259 } 1260 1261 void gb_interface_put(struct gb_interface *intf) 1262 { 1263 put_device(&intf->dev); 1264 } 1265