1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SVC Greybus driver. 4 * 5 * Copyright 2015 Google Inc. 6 * Copyright 2015 Linaro Ltd. 7 */ 8 9 #include <linux/debugfs.h> 10 #include <linux/workqueue.h> 11 #include <linux/greybus.h> 12 13 #define SVC_INTF_EJECT_TIMEOUT 9000 14 #define SVC_INTF_ACTIVATE_TIMEOUT 6000 15 #define SVC_INTF_RESUME_TIMEOUT 3000 16 17 struct gb_svc_deferred_request { 18 struct work_struct work; 19 struct gb_operation *operation; 20 }; 21 22 static int gb_svc_queue_deferred_request(struct gb_operation *operation); 23 24 static ssize_t endo_id_show(struct device *dev, 25 struct device_attribute *attr, char *buf) 26 { 27 struct gb_svc *svc = to_gb_svc(dev); 28 29 return sprintf(buf, "0x%04x\n", svc->endo_id); 30 } 31 static DEVICE_ATTR_RO(endo_id); 32 33 static ssize_t ap_intf_id_show(struct device *dev, 34 struct device_attribute *attr, char *buf) 35 { 36 struct gb_svc *svc = to_gb_svc(dev); 37 38 return sprintf(buf, "%u\n", svc->ap_intf_id); 39 } 40 static DEVICE_ATTR_RO(ap_intf_id); 41 42 // FIXME 43 // This is a hack, we need to do this "right" and clean the interface up 44 // properly, not just forcibly yank the thing out of the system and hope for the 45 // best. But for now, people want their modules to come out without having to 46 // throw the thing to the ground or get out a screwdriver. 47 static ssize_t intf_eject_store(struct device *dev, 48 struct device_attribute *attr, const char *buf, 49 size_t len) 50 { 51 struct gb_svc *svc = to_gb_svc(dev); 52 unsigned short intf_id; 53 int ret; 54 55 ret = kstrtou16(buf, 10, &intf_id); 56 if (ret < 0) 57 return ret; 58 59 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id); 60 61 ret = gb_svc_intf_eject(svc, intf_id); 62 if (ret < 0) 63 return ret; 64 65 return len; 66 } 67 static DEVICE_ATTR_WO(intf_eject); 68 69 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr, 70 char *buf) 71 { 72 struct gb_svc *svc = to_gb_svc(dev); 73 74 return sprintf(buf, "%s\n", 75 gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled"); 76 } 77 78 static ssize_t watchdog_store(struct device *dev, 79 struct device_attribute *attr, const char *buf, 80 size_t len) 81 { 82 struct gb_svc *svc = to_gb_svc(dev); 83 int retval; 84 bool user_request; 85 86 retval = strtobool(buf, &user_request); 87 if (retval) 88 return retval; 89 90 if (user_request) 91 retval = gb_svc_watchdog_enable(svc); 92 else 93 retval = gb_svc_watchdog_disable(svc); 94 if (retval) 95 return retval; 96 return len; 97 } 98 static DEVICE_ATTR_RW(watchdog); 99 100 static ssize_t watchdog_action_show(struct device *dev, 101 struct device_attribute *attr, char *buf) 102 { 103 struct gb_svc *svc = to_gb_svc(dev); 104 105 if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) 106 return sprintf(buf, "panic\n"); 107 else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) 108 return sprintf(buf, "reset\n"); 109 110 return -EINVAL; 111 } 112 113 static ssize_t watchdog_action_store(struct device *dev, 114 struct device_attribute *attr, 115 const char *buf, size_t len) 116 { 117 struct gb_svc *svc = to_gb_svc(dev); 118 119 if (sysfs_streq(buf, "panic")) 120 svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL; 121 else if (sysfs_streq(buf, "reset")) 122 svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO; 123 else 124 return -EINVAL; 125 126 return len; 127 } 128 static DEVICE_ATTR_RW(watchdog_action); 129 130 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value) 131 { 132 struct gb_svc_pwrmon_rail_count_get_response response; 133 int ret; 134 135 ret = gb_operation_sync(svc->connection, 136 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0, 137 &response, sizeof(response)); 138 if (ret) { 139 dev_err(&svc->dev, "failed to get rail count: %d\n", ret); 140 return ret; 141 } 142 143 *value = response.rail_count; 144 145 return 0; 146 } 147 148 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc, 149 struct gb_svc_pwrmon_rail_names_get_response *response, 150 size_t bufsize) 151 { 152 int ret; 153 154 ret = gb_operation_sync(svc->connection, 155 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0, 156 response, bufsize); 157 if (ret) { 158 dev_err(&svc->dev, "failed to get rail names: %d\n", ret); 159 return ret; 160 } 161 162 if (response->status != GB_SVC_OP_SUCCESS) { 163 dev_err(&svc->dev, 164 "SVC error while getting rail names: %u\n", 165 response->status); 166 return -EREMOTEIO; 167 } 168 169 return 0; 170 } 171 172 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id, 173 u8 measurement_type, u32 *value) 174 { 175 struct gb_svc_pwrmon_sample_get_request request; 176 struct gb_svc_pwrmon_sample_get_response response; 177 int ret; 178 179 request.rail_id = rail_id; 180 request.measurement_type = measurement_type; 181 182 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET, 183 &request, sizeof(request), 184 &response, sizeof(response)); 185 if (ret) { 186 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret); 187 return ret; 188 } 189 190 if (response.result) { 191 dev_err(&svc->dev, 192 "UniPro error while getting rail power sample (%d %d): %d\n", 193 rail_id, measurement_type, response.result); 194 switch (response.result) { 195 case GB_SVC_PWRMON_GET_SAMPLE_INVAL: 196 return -EINVAL; 197 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: 198 return -ENOMSG; 199 default: 200 return -EREMOTEIO; 201 } 202 } 203 204 *value = le32_to_cpu(response.measurement); 205 206 return 0; 207 } 208 209 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, 210 u8 measurement_type, u32 *value) 211 { 212 struct gb_svc_pwrmon_intf_sample_get_request request; 213 struct gb_svc_pwrmon_intf_sample_get_response response; 214 int ret; 215 216 request.intf_id = intf_id; 217 request.measurement_type = measurement_type; 218 219 ret = gb_operation_sync(svc->connection, 220 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET, 221 &request, sizeof(request), 222 &response, sizeof(response)); 223 if (ret) { 224 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret); 225 return ret; 226 } 227 228 if (response.result) { 229 dev_err(&svc->dev, 230 "UniPro error while getting intf power sample (%d %d): %d\n", 231 intf_id, measurement_type, response.result); 232 switch (response.result) { 233 case GB_SVC_PWRMON_GET_SAMPLE_INVAL: 234 return -EINVAL; 235 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: 236 return -ENOMSG; 237 default: 238 return -EREMOTEIO; 239 } 240 } 241 242 *value = le32_to_cpu(response.measurement); 243 244 return 0; 245 } 246 247 static struct attribute *svc_attrs[] = { 248 &dev_attr_endo_id.attr, 249 &dev_attr_ap_intf_id.attr, 250 &dev_attr_intf_eject.attr, 251 &dev_attr_watchdog.attr, 252 &dev_attr_watchdog_action.attr, 253 NULL, 254 }; 255 ATTRIBUTE_GROUPS(svc); 256 257 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) 258 { 259 struct gb_svc_intf_device_id_request request; 260 261 request.intf_id = intf_id; 262 request.device_id = device_id; 263 264 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, 265 &request, sizeof(request), NULL, 0); 266 } 267 268 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id) 269 { 270 struct gb_svc_intf_eject_request request; 271 int ret; 272 273 request.intf_id = intf_id; 274 275 /* 276 * The pulse width for module release in svc is long so we need to 277 * increase the timeout so the operation will not return to soon. 278 */ 279 ret = gb_operation_sync_timeout(svc->connection, 280 GB_SVC_TYPE_INTF_EJECT, &request, 281 sizeof(request), NULL, 0, 282 SVC_INTF_EJECT_TIMEOUT); 283 if (ret) { 284 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id); 285 return ret; 286 } 287 288 return 0; 289 } 290 291 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable) 292 { 293 struct gb_svc_intf_vsys_request request; 294 struct gb_svc_intf_vsys_response response; 295 int type, ret; 296 297 request.intf_id = intf_id; 298 299 if (enable) 300 type = GB_SVC_TYPE_INTF_VSYS_ENABLE; 301 else 302 type = GB_SVC_TYPE_INTF_VSYS_DISABLE; 303 304 ret = gb_operation_sync(svc->connection, type, 305 &request, sizeof(request), 306 &response, sizeof(response)); 307 if (ret < 0) 308 return ret; 309 if (response.result_code != GB_SVC_INTF_VSYS_OK) 310 return -EREMOTEIO; 311 return 0; 312 } 313 314 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable) 315 { 316 struct gb_svc_intf_refclk_request request; 317 struct gb_svc_intf_refclk_response response; 318 int type, ret; 319 320 request.intf_id = intf_id; 321 322 if (enable) 323 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE; 324 else 325 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE; 326 327 ret = gb_operation_sync(svc->connection, type, 328 &request, sizeof(request), 329 &response, sizeof(response)); 330 if (ret < 0) 331 return ret; 332 if (response.result_code != GB_SVC_INTF_REFCLK_OK) 333 return -EREMOTEIO; 334 return 0; 335 } 336 337 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable) 338 { 339 struct gb_svc_intf_unipro_request request; 340 struct gb_svc_intf_unipro_response response; 341 int type, ret; 342 343 request.intf_id = intf_id; 344 345 if (enable) 346 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE; 347 else 348 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE; 349 350 ret = gb_operation_sync(svc->connection, type, 351 &request, sizeof(request), 352 &response, sizeof(response)); 353 if (ret < 0) 354 return ret; 355 if (response.result_code != GB_SVC_INTF_UNIPRO_OK) 356 return -EREMOTEIO; 357 return 0; 358 } 359 360 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type) 361 { 362 struct gb_svc_intf_activate_request request; 363 struct gb_svc_intf_activate_response response; 364 int ret; 365 366 request.intf_id = intf_id; 367 368 ret = gb_operation_sync_timeout(svc->connection, 369 GB_SVC_TYPE_INTF_ACTIVATE, 370 &request, sizeof(request), 371 &response, sizeof(response), 372 SVC_INTF_ACTIVATE_TIMEOUT); 373 if (ret < 0) 374 return ret; 375 if (response.status != GB_SVC_OP_SUCCESS) { 376 dev_err(&svc->dev, "failed to activate interface %u: %u\n", 377 intf_id, response.status); 378 return -EREMOTEIO; 379 } 380 381 *intf_type = response.intf_type; 382 383 return 0; 384 } 385 386 int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id) 387 { 388 struct gb_svc_intf_resume_request request; 389 struct gb_svc_intf_resume_response response; 390 int ret; 391 392 request.intf_id = intf_id; 393 394 ret = gb_operation_sync_timeout(svc->connection, 395 GB_SVC_TYPE_INTF_RESUME, 396 &request, sizeof(request), 397 &response, sizeof(response), 398 SVC_INTF_RESUME_TIMEOUT); 399 if (ret < 0) { 400 dev_err(&svc->dev, "failed to send interface resume %u: %d\n", 401 intf_id, ret); 402 return ret; 403 } 404 405 if (response.status != GB_SVC_OP_SUCCESS) { 406 dev_err(&svc->dev, "failed to resume interface %u: %u\n", 407 intf_id, response.status); 408 return -EREMOTEIO; 409 } 410 411 return 0; 412 } 413 414 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, 415 u32 *value) 416 { 417 struct gb_svc_dme_peer_get_request request; 418 struct gb_svc_dme_peer_get_response response; 419 u16 result; 420 int ret; 421 422 request.intf_id = intf_id; 423 request.attr = cpu_to_le16(attr); 424 request.selector = cpu_to_le16(selector); 425 426 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, 427 &request, sizeof(request), 428 &response, sizeof(response)); 429 if (ret) { 430 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", 431 intf_id, attr, selector, ret); 432 return ret; 433 } 434 435 result = le16_to_cpu(response.result_code); 436 if (result) { 437 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", 438 intf_id, attr, selector, result); 439 return -EREMOTEIO; 440 } 441 442 if (value) 443 *value = le32_to_cpu(response.attr_value); 444 445 return 0; 446 } 447 448 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, 449 u32 value) 450 { 451 struct gb_svc_dme_peer_set_request request; 452 struct gb_svc_dme_peer_set_response response; 453 u16 result; 454 int ret; 455 456 request.intf_id = intf_id; 457 request.attr = cpu_to_le16(attr); 458 request.selector = cpu_to_le16(selector); 459 request.value = cpu_to_le32(value); 460 461 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, 462 &request, sizeof(request), 463 &response, sizeof(response)); 464 if (ret) { 465 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", 466 intf_id, attr, selector, value, ret); 467 return ret; 468 } 469 470 result = le16_to_cpu(response.result_code); 471 if (result) { 472 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", 473 intf_id, attr, selector, value, result); 474 return -EREMOTEIO; 475 } 476 477 return 0; 478 } 479 480 int gb_svc_connection_create(struct gb_svc *svc, 481 u8 intf1_id, u16 cport1_id, 482 u8 intf2_id, u16 cport2_id, 483 u8 cport_flags) 484 { 485 struct gb_svc_conn_create_request request; 486 487 request.intf1_id = intf1_id; 488 request.cport1_id = cpu_to_le16(cport1_id); 489 request.intf2_id = intf2_id; 490 request.cport2_id = cpu_to_le16(cport2_id); 491 request.tc = 0; /* TC0 */ 492 request.flags = cport_flags; 493 494 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, 495 &request, sizeof(request), NULL, 0); 496 } 497 498 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, 499 u8 intf2_id, u16 cport2_id) 500 { 501 struct gb_svc_conn_destroy_request request; 502 struct gb_connection *connection = svc->connection; 503 int ret; 504 505 request.intf1_id = intf1_id; 506 request.cport1_id = cpu_to_le16(cport1_id); 507 request.intf2_id = intf2_id; 508 request.cport2_id = cpu_to_le16(cport2_id); 509 510 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, 511 &request, sizeof(request), NULL, 0); 512 if (ret) { 513 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", 514 intf1_id, cport1_id, intf2_id, cport2_id, ret); 515 } 516 } 517 518 /* Creates bi-directional routes between the devices */ 519 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, 520 u8 intf2_id, u8 dev2_id) 521 { 522 struct gb_svc_route_create_request request; 523 524 request.intf1_id = intf1_id; 525 request.dev1_id = dev1_id; 526 request.intf2_id = intf2_id; 527 request.dev2_id = dev2_id; 528 529 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, 530 &request, sizeof(request), NULL, 0); 531 } 532 533 /* Destroys bi-directional routes between the devices */ 534 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) 535 { 536 struct gb_svc_route_destroy_request request; 537 int ret; 538 539 request.intf1_id = intf1_id; 540 request.intf2_id = intf2_id; 541 542 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, 543 &request, sizeof(request), NULL, 0); 544 if (ret) { 545 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", 546 intf1_id, intf2_id, ret); 547 } 548 } 549 550 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, 551 u8 tx_mode, u8 tx_gear, u8 tx_nlanes, 552 u8 tx_amplitude, u8 tx_hs_equalizer, 553 u8 rx_mode, u8 rx_gear, u8 rx_nlanes, 554 u8 flags, u32 quirks, 555 struct gb_svc_l2_timer_cfg *local, 556 struct gb_svc_l2_timer_cfg *remote) 557 { 558 struct gb_svc_intf_set_pwrm_request request; 559 struct gb_svc_intf_set_pwrm_response response; 560 int ret; 561 u16 result_code; 562 563 memset(&request, 0, sizeof(request)); 564 565 request.intf_id = intf_id; 566 request.hs_series = hs_series; 567 request.tx_mode = tx_mode; 568 request.tx_gear = tx_gear; 569 request.tx_nlanes = tx_nlanes; 570 request.tx_amplitude = tx_amplitude; 571 request.tx_hs_equalizer = tx_hs_equalizer; 572 request.rx_mode = rx_mode; 573 request.rx_gear = rx_gear; 574 request.rx_nlanes = rx_nlanes; 575 request.flags = flags; 576 request.quirks = cpu_to_le32(quirks); 577 if (local) 578 request.local_l2timerdata = *local; 579 if (remote) 580 request.remote_l2timerdata = *remote; 581 582 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, 583 &request, sizeof(request), 584 &response, sizeof(response)); 585 if (ret < 0) 586 return ret; 587 588 result_code = response.result_code; 589 if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) { 590 dev_err(&svc->dev, "set power mode = %d\n", result_code); 591 return -EIO; 592 } 593 594 return 0; 595 } 596 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); 597 598 int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id) 599 { 600 struct gb_svc_intf_set_pwrm_request request; 601 struct gb_svc_intf_set_pwrm_response response; 602 int ret; 603 u16 result_code; 604 605 memset(&request, 0, sizeof(request)); 606 607 request.intf_id = intf_id; 608 request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A; 609 request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; 610 request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; 611 612 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, 613 &request, sizeof(request), 614 &response, sizeof(response)); 615 if (ret < 0) { 616 dev_err(&svc->dev, 617 "failed to send set power mode operation to interface %u: %d\n", 618 intf_id, ret); 619 return ret; 620 } 621 622 result_code = response.result_code; 623 if (result_code != GB_SVC_SETPWRM_PWR_OK) { 624 dev_err(&svc->dev, 625 "failed to hibernate the link for interface %u: %u\n", 626 intf_id, result_code); 627 return -EIO; 628 } 629 630 return 0; 631 } 632 633 int gb_svc_ping(struct gb_svc *svc) 634 { 635 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING, 636 NULL, 0, NULL, 0, 637 GB_OPERATION_TIMEOUT_DEFAULT * 2); 638 } 639 640 static int gb_svc_version_request(struct gb_operation *op) 641 { 642 struct gb_connection *connection = op->connection; 643 struct gb_svc *svc = gb_connection_get_data(connection); 644 struct gb_svc_version_request *request; 645 struct gb_svc_version_response *response; 646 647 if (op->request->payload_size < sizeof(*request)) { 648 dev_err(&svc->dev, "short version request (%zu < %zu)\n", 649 op->request->payload_size, 650 sizeof(*request)); 651 return -EINVAL; 652 } 653 654 request = op->request->payload; 655 656 if (request->major > GB_SVC_VERSION_MAJOR) { 657 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", 658 request->major, GB_SVC_VERSION_MAJOR); 659 return -ENOTSUPP; 660 } 661 662 svc->protocol_major = request->major; 663 svc->protocol_minor = request->minor; 664 665 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) 666 return -ENOMEM; 667 668 response = op->response->payload; 669 response->major = svc->protocol_major; 670 response->minor = svc->protocol_minor; 671 672 return 0; 673 } 674 675 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, 676 size_t len, loff_t *offset) 677 { 678 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 679 file_inode(file)->i_private; 680 struct gb_svc *svc = pwrmon_rails->svc; 681 int ret, desc; 682 u32 value; 683 char buff[16]; 684 685 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 686 GB_SVC_PWRMON_TYPE_VOL, &value); 687 if (ret) { 688 dev_err(&svc->dev, 689 "failed to get voltage sample %u: %d\n", 690 pwrmon_rails->id, ret); 691 return ret; 692 } 693 694 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 695 696 return simple_read_from_buffer(buf, len, offset, buff, desc); 697 } 698 699 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, 700 size_t len, loff_t *offset) 701 { 702 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 703 file_inode(file)->i_private; 704 struct gb_svc *svc = pwrmon_rails->svc; 705 int ret, desc; 706 u32 value; 707 char buff[16]; 708 709 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 710 GB_SVC_PWRMON_TYPE_CURR, &value); 711 if (ret) { 712 dev_err(&svc->dev, 713 "failed to get current sample %u: %d\n", 714 pwrmon_rails->id, ret); 715 return ret; 716 } 717 718 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 719 720 return simple_read_from_buffer(buf, len, offset, buff, desc); 721 } 722 723 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, 724 size_t len, loff_t *offset) 725 { 726 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 727 file_inode(file)->i_private; 728 struct gb_svc *svc = pwrmon_rails->svc; 729 int ret, desc; 730 u32 value; 731 char buff[16]; 732 733 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 734 GB_SVC_PWRMON_TYPE_PWR, &value); 735 if (ret) { 736 dev_err(&svc->dev, "failed to get power sample %u: %d\n", 737 pwrmon_rails->id, ret); 738 return ret; 739 } 740 741 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 742 743 return simple_read_from_buffer(buf, len, offset, buff, desc); 744 } 745 746 static const struct file_operations pwrmon_debugfs_voltage_fops = { 747 .read = pwr_debugfs_voltage_read, 748 }; 749 750 static const struct file_operations pwrmon_debugfs_current_fops = { 751 .read = pwr_debugfs_current_read, 752 }; 753 754 static const struct file_operations pwrmon_debugfs_power_fops = { 755 .read = pwr_debugfs_power_read, 756 }; 757 758 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc) 759 { 760 int i; 761 size_t bufsize; 762 struct dentry *dent; 763 struct gb_svc_pwrmon_rail_names_get_response *rail_names; 764 u8 rail_count; 765 766 dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry); 767 if (IS_ERR_OR_NULL(dent)) 768 return; 769 770 if (gb_svc_pwrmon_rail_count_get(svc, &rail_count)) 771 goto err_pwrmon_debugfs; 772 773 if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT) 774 goto err_pwrmon_debugfs; 775 776 bufsize = sizeof(*rail_names) + 777 GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count; 778 779 rail_names = kzalloc(bufsize, GFP_KERNEL); 780 if (!rail_names) 781 goto err_pwrmon_debugfs; 782 783 svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails), 784 GFP_KERNEL); 785 if (!svc->pwrmon_rails) 786 goto err_pwrmon_debugfs_free; 787 788 if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize)) 789 goto err_pwrmon_debugfs_free; 790 791 for (i = 0; i < rail_count; i++) { 792 struct dentry *dir; 793 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i]; 794 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; 795 796 snprintf(fname, sizeof(fname), "%s", 797 (char *)&rail_names->name[i]); 798 799 rail->id = i; 800 rail->svc = svc; 801 802 dir = debugfs_create_dir(fname, dent); 803 debugfs_create_file("voltage_now", 0444, dir, rail, 804 &pwrmon_debugfs_voltage_fops); 805 debugfs_create_file("current_now", 0444, dir, rail, 806 &pwrmon_debugfs_current_fops); 807 debugfs_create_file("power_now", 0444, dir, rail, 808 &pwrmon_debugfs_power_fops); 809 } 810 811 kfree(rail_names); 812 return; 813 814 err_pwrmon_debugfs_free: 815 kfree(rail_names); 816 kfree(svc->pwrmon_rails); 817 svc->pwrmon_rails = NULL; 818 819 err_pwrmon_debugfs: 820 debugfs_remove(dent); 821 } 822 823 static void gb_svc_debugfs_init(struct gb_svc *svc) 824 { 825 svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev), 826 gb_debugfs_get()); 827 gb_svc_pwrmon_debugfs_init(svc); 828 } 829 830 static void gb_svc_debugfs_exit(struct gb_svc *svc) 831 { 832 debugfs_remove_recursive(svc->debugfs_dentry); 833 kfree(svc->pwrmon_rails); 834 svc->pwrmon_rails = NULL; 835 } 836 837 static int gb_svc_hello(struct gb_operation *op) 838 { 839 struct gb_connection *connection = op->connection; 840 struct gb_svc *svc = gb_connection_get_data(connection); 841 struct gb_svc_hello_request *hello_request; 842 int ret; 843 844 if (op->request->payload_size < sizeof(*hello_request)) { 845 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", 846 op->request->payload_size, 847 sizeof(*hello_request)); 848 return -EINVAL; 849 } 850 851 hello_request = op->request->payload; 852 svc->endo_id = le16_to_cpu(hello_request->endo_id); 853 svc->ap_intf_id = hello_request->interface_id; 854 855 ret = device_add(&svc->dev); 856 if (ret) { 857 dev_err(&svc->dev, "failed to register svc device: %d\n", ret); 858 return ret; 859 } 860 861 ret = gb_svc_watchdog_create(svc); 862 if (ret) { 863 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret); 864 goto err_deregister_svc; 865 } 866 867 /* 868 * FIXME: This is a temporary hack to reconfigure the link at HELLO 869 * (which abuses the deferred request processing mechanism). 870 */ 871 ret = gb_svc_queue_deferred_request(op); 872 if (ret) 873 goto err_destroy_watchdog; 874 875 gb_svc_debugfs_init(svc); 876 877 return 0; 878 879 err_destroy_watchdog: 880 gb_svc_watchdog_destroy(svc); 881 err_deregister_svc: 882 device_del(&svc->dev); 883 884 return ret; 885 } 886 887 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc, 888 u8 intf_id) 889 { 890 struct gb_host_device *hd = svc->hd; 891 struct gb_module *module; 892 size_t num_interfaces; 893 u8 module_id; 894 895 list_for_each_entry(module, &hd->modules, hd_node) { 896 module_id = module->module_id; 897 num_interfaces = module->num_interfaces; 898 899 if (intf_id >= module_id && 900 intf_id < module_id + num_interfaces) { 901 return module->interfaces[intf_id - module_id]; 902 } 903 } 904 905 return NULL; 906 } 907 908 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) 909 { 910 struct gb_host_device *hd = svc->hd; 911 struct gb_module *module; 912 913 list_for_each_entry(module, &hd->modules, hd_node) { 914 if (module->module_id == module_id) 915 return module; 916 } 917 918 return NULL; 919 } 920 921 static void gb_svc_process_hello_deferred(struct gb_operation *operation) 922 { 923 struct gb_connection *connection = operation->connection; 924 struct gb_svc *svc = gb_connection_get_data(connection); 925 int ret; 926 927 /* 928 * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch 929 * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient 930 * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged 931 * module. 932 * 933 * The code should be removed once SW-2217, Heuristic for UniPro 934 * Power Mode Changes is resolved. 935 */ 936 ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id, 937 GB_SVC_UNIPRO_HS_SERIES_A, 938 GB_SVC_UNIPRO_SLOW_AUTO_MODE, 939 2, 1, 940 GB_SVC_SMALL_AMPLITUDE, 941 GB_SVC_NO_DE_EMPHASIS, 942 GB_SVC_UNIPRO_SLOW_AUTO_MODE, 943 2, 1, 944 0, 0, 945 NULL, NULL); 946 947 if (ret) 948 dev_warn(&svc->dev, 949 "power mode change failed on AP to switch link: %d\n", 950 ret); 951 } 952 953 static void gb_svc_process_module_inserted(struct gb_operation *operation) 954 { 955 struct gb_svc_module_inserted_request *request; 956 struct gb_connection *connection = operation->connection; 957 struct gb_svc *svc = gb_connection_get_data(connection); 958 struct gb_host_device *hd = svc->hd; 959 struct gb_module *module; 960 size_t num_interfaces; 961 u8 module_id; 962 u16 flags; 963 int ret; 964 965 /* The request message size has already been verified. */ 966 request = operation->request->payload; 967 module_id = request->primary_intf_id; 968 num_interfaces = request->intf_count; 969 flags = le16_to_cpu(request->flags); 970 971 dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n", 972 __func__, module_id, num_interfaces, flags); 973 974 if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) { 975 dev_warn(&svc->dev, "no primary interface detected on module %u\n", 976 module_id); 977 } 978 979 module = gb_svc_module_lookup(svc, module_id); 980 if (module) { 981 dev_warn(&svc->dev, "unexpected module-inserted event %u\n", 982 module_id); 983 return; 984 } 985 986 module = gb_module_create(hd, module_id, num_interfaces); 987 if (!module) { 988 dev_err(&svc->dev, "failed to create module\n"); 989 return; 990 } 991 992 ret = gb_module_add(module); 993 if (ret) { 994 gb_module_put(module); 995 return; 996 } 997 998 list_add(&module->hd_node, &hd->modules); 999 } 1000 1001 static void gb_svc_process_module_removed(struct gb_operation *operation) 1002 { 1003 struct gb_svc_module_removed_request *request; 1004 struct gb_connection *connection = operation->connection; 1005 struct gb_svc *svc = gb_connection_get_data(connection); 1006 struct gb_module *module; 1007 u8 module_id; 1008 1009 /* The request message size has already been verified. */ 1010 request = operation->request->payload; 1011 module_id = request->primary_intf_id; 1012 1013 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id); 1014 1015 module = gb_svc_module_lookup(svc, module_id); 1016 if (!module) { 1017 dev_warn(&svc->dev, "unexpected module-removed event %u\n", 1018 module_id); 1019 return; 1020 } 1021 1022 module->disconnected = true; 1023 1024 gb_module_del(module); 1025 list_del(&module->hd_node); 1026 gb_module_put(module); 1027 } 1028 1029 static void gb_svc_process_intf_oops(struct gb_operation *operation) 1030 { 1031 struct gb_svc_intf_oops_request *request; 1032 struct gb_connection *connection = operation->connection; 1033 struct gb_svc *svc = gb_connection_get_data(connection); 1034 struct gb_interface *intf; 1035 u8 intf_id; 1036 u8 reason; 1037 1038 /* The request message size has already been verified. */ 1039 request = operation->request->payload; 1040 intf_id = request->intf_id; 1041 reason = request->reason; 1042 1043 intf = gb_svc_interface_lookup(svc, intf_id); 1044 if (!intf) { 1045 dev_warn(&svc->dev, "unexpected interface-oops event %u\n", 1046 intf_id); 1047 return; 1048 } 1049 1050 dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n", 1051 intf_id, reason); 1052 1053 mutex_lock(&intf->mutex); 1054 intf->disconnected = true; 1055 gb_interface_disable(intf); 1056 gb_interface_deactivate(intf); 1057 mutex_unlock(&intf->mutex); 1058 } 1059 1060 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) 1061 { 1062 struct gb_svc_intf_mailbox_event_request *request; 1063 struct gb_connection *connection = operation->connection; 1064 struct gb_svc *svc = gb_connection_get_data(connection); 1065 struct gb_interface *intf; 1066 u8 intf_id; 1067 u16 result_code; 1068 u32 mailbox; 1069 1070 /* The request message size has already been verified. */ 1071 request = operation->request->payload; 1072 intf_id = request->intf_id; 1073 result_code = le16_to_cpu(request->result_code); 1074 mailbox = le32_to_cpu(request->mailbox); 1075 1076 dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n", 1077 __func__, intf_id, result_code, mailbox); 1078 1079 intf = gb_svc_interface_lookup(svc, intf_id); 1080 if (!intf) { 1081 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id); 1082 return; 1083 } 1084 1085 gb_interface_mailbox_event(intf, result_code, mailbox); 1086 } 1087 1088 static void gb_svc_process_deferred_request(struct work_struct *work) 1089 { 1090 struct gb_svc_deferred_request *dr; 1091 struct gb_operation *operation; 1092 struct gb_svc *svc; 1093 u8 type; 1094 1095 dr = container_of(work, struct gb_svc_deferred_request, work); 1096 operation = dr->operation; 1097 svc = gb_connection_get_data(operation->connection); 1098 type = operation->request->header->type; 1099 1100 switch (type) { 1101 case GB_SVC_TYPE_SVC_HELLO: 1102 gb_svc_process_hello_deferred(operation); 1103 break; 1104 case GB_SVC_TYPE_MODULE_INSERTED: 1105 gb_svc_process_module_inserted(operation); 1106 break; 1107 case GB_SVC_TYPE_MODULE_REMOVED: 1108 gb_svc_process_module_removed(operation); 1109 break; 1110 case GB_SVC_TYPE_INTF_MAILBOX_EVENT: 1111 gb_svc_process_intf_mailbox_event(operation); 1112 break; 1113 case GB_SVC_TYPE_INTF_OOPS: 1114 gb_svc_process_intf_oops(operation); 1115 break; 1116 default: 1117 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); 1118 } 1119 1120 gb_operation_put(operation); 1121 kfree(dr); 1122 } 1123 1124 static int gb_svc_queue_deferred_request(struct gb_operation *operation) 1125 { 1126 struct gb_svc *svc = gb_connection_get_data(operation->connection); 1127 struct gb_svc_deferred_request *dr; 1128 1129 dr = kmalloc(sizeof(*dr), GFP_KERNEL); 1130 if (!dr) 1131 return -ENOMEM; 1132 1133 gb_operation_get(operation); 1134 1135 dr->operation = operation; 1136 INIT_WORK(&dr->work, gb_svc_process_deferred_request); 1137 1138 queue_work(svc->wq, &dr->work); 1139 1140 return 0; 1141 } 1142 1143 static int gb_svc_intf_reset_recv(struct gb_operation *op) 1144 { 1145 struct gb_svc *svc = gb_connection_get_data(op->connection); 1146 struct gb_message *request = op->request; 1147 struct gb_svc_intf_reset_request *reset; 1148 1149 if (request->payload_size < sizeof(*reset)) { 1150 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", 1151 request->payload_size, sizeof(*reset)); 1152 return -EINVAL; 1153 } 1154 reset = request->payload; 1155 1156 /* FIXME Reset the interface here */ 1157 1158 return 0; 1159 } 1160 1161 static int gb_svc_module_inserted_recv(struct gb_operation *op) 1162 { 1163 struct gb_svc *svc = gb_connection_get_data(op->connection); 1164 struct gb_svc_module_inserted_request *request; 1165 1166 if (op->request->payload_size < sizeof(*request)) { 1167 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n", 1168 op->request->payload_size, sizeof(*request)); 1169 return -EINVAL; 1170 } 1171 1172 request = op->request->payload; 1173 1174 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, 1175 request->primary_intf_id); 1176 1177 return gb_svc_queue_deferred_request(op); 1178 } 1179 1180 static int gb_svc_module_removed_recv(struct gb_operation *op) 1181 { 1182 struct gb_svc *svc = gb_connection_get_data(op->connection); 1183 struct gb_svc_module_removed_request *request; 1184 1185 if (op->request->payload_size < sizeof(*request)) { 1186 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n", 1187 op->request->payload_size, sizeof(*request)); 1188 return -EINVAL; 1189 } 1190 1191 request = op->request->payload; 1192 1193 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, 1194 request->primary_intf_id); 1195 1196 return gb_svc_queue_deferred_request(op); 1197 } 1198 1199 static int gb_svc_intf_oops_recv(struct gb_operation *op) 1200 { 1201 struct gb_svc *svc = gb_connection_get_data(op->connection); 1202 struct gb_svc_intf_oops_request *request; 1203 1204 if (op->request->payload_size < sizeof(*request)) { 1205 dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n", 1206 op->request->payload_size, sizeof(*request)); 1207 return -EINVAL; 1208 } 1209 1210 return gb_svc_queue_deferred_request(op); 1211 } 1212 1213 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op) 1214 { 1215 struct gb_svc *svc = gb_connection_get_data(op->connection); 1216 struct gb_svc_intf_mailbox_event_request *request; 1217 1218 if (op->request->payload_size < sizeof(*request)) { 1219 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n", 1220 op->request->payload_size, sizeof(*request)); 1221 return -EINVAL; 1222 } 1223 1224 request = op->request->payload; 1225 1226 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); 1227 1228 return gb_svc_queue_deferred_request(op); 1229 } 1230 1231 static int gb_svc_request_handler(struct gb_operation *op) 1232 { 1233 struct gb_connection *connection = op->connection; 1234 struct gb_svc *svc = gb_connection_get_data(connection); 1235 u8 type = op->type; 1236 int ret = 0; 1237 1238 /* 1239 * SVC requests need to follow a specific order (at least initially) and 1240 * below code takes care of enforcing that. The expected order is: 1241 * - PROTOCOL_VERSION 1242 * - SVC_HELLO 1243 * - Any other request, but the earlier two. 1244 * 1245 * Incoming requests are guaranteed to be serialized and so we don't 1246 * need to protect 'state' for any races. 1247 */ 1248 switch (type) { 1249 case GB_SVC_TYPE_PROTOCOL_VERSION: 1250 if (svc->state != GB_SVC_STATE_RESET) 1251 ret = -EINVAL; 1252 break; 1253 case GB_SVC_TYPE_SVC_HELLO: 1254 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) 1255 ret = -EINVAL; 1256 break; 1257 default: 1258 if (svc->state != GB_SVC_STATE_SVC_HELLO) 1259 ret = -EINVAL; 1260 break; 1261 } 1262 1263 if (ret) { 1264 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", 1265 type, svc->state); 1266 return ret; 1267 } 1268 1269 switch (type) { 1270 case GB_SVC_TYPE_PROTOCOL_VERSION: 1271 ret = gb_svc_version_request(op); 1272 if (!ret) 1273 svc->state = GB_SVC_STATE_PROTOCOL_VERSION; 1274 return ret; 1275 case GB_SVC_TYPE_SVC_HELLO: 1276 ret = gb_svc_hello(op); 1277 if (!ret) 1278 svc->state = GB_SVC_STATE_SVC_HELLO; 1279 return ret; 1280 case GB_SVC_TYPE_INTF_RESET: 1281 return gb_svc_intf_reset_recv(op); 1282 case GB_SVC_TYPE_MODULE_INSERTED: 1283 return gb_svc_module_inserted_recv(op); 1284 case GB_SVC_TYPE_MODULE_REMOVED: 1285 return gb_svc_module_removed_recv(op); 1286 case GB_SVC_TYPE_INTF_MAILBOX_EVENT: 1287 return gb_svc_intf_mailbox_event_recv(op); 1288 case GB_SVC_TYPE_INTF_OOPS: 1289 return gb_svc_intf_oops_recv(op); 1290 default: 1291 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); 1292 return -EINVAL; 1293 } 1294 } 1295 1296 static void gb_svc_release(struct device *dev) 1297 { 1298 struct gb_svc *svc = to_gb_svc(dev); 1299 1300 if (svc->connection) 1301 gb_connection_destroy(svc->connection); 1302 ida_destroy(&svc->device_id_map); 1303 destroy_workqueue(svc->wq); 1304 kfree(svc); 1305 } 1306 1307 struct device_type greybus_svc_type = { 1308 .name = "greybus_svc", 1309 .release = gb_svc_release, 1310 }; 1311 1312 struct gb_svc *gb_svc_create(struct gb_host_device *hd) 1313 { 1314 struct gb_svc *svc; 1315 1316 svc = kzalloc(sizeof(*svc), GFP_KERNEL); 1317 if (!svc) 1318 return NULL; 1319 1320 svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); 1321 if (!svc->wq) { 1322 kfree(svc); 1323 return NULL; 1324 } 1325 1326 svc->dev.parent = &hd->dev; 1327 svc->dev.bus = &greybus_bus_type; 1328 svc->dev.type = &greybus_svc_type; 1329 svc->dev.groups = svc_groups; 1330 svc->dev.dma_mask = svc->dev.parent->dma_mask; 1331 device_initialize(&svc->dev); 1332 1333 dev_set_name(&svc->dev, "%d-svc", hd->bus_id); 1334 1335 ida_init(&svc->device_id_map); 1336 svc->state = GB_SVC_STATE_RESET; 1337 svc->hd = hd; 1338 1339 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, 1340 gb_svc_request_handler); 1341 if (IS_ERR(svc->connection)) { 1342 dev_err(&svc->dev, "failed to create connection: %ld\n", 1343 PTR_ERR(svc->connection)); 1344 goto err_put_device; 1345 } 1346 1347 gb_connection_set_data(svc->connection, svc); 1348 1349 return svc; 1350 1351 err_put_device: 1352 put_device(&svc->dev); 1353 return NULL; 1354 } 1355 1356 int gb_svc_add(struct gb_svc *svc) 1357 { 1358 int ret; 1359 1360 /* 1361 * The SVC protocol is currently driven by the SVC, so the SVC device 1362 * is added from the connection request handler when enough 1363 * information has been received. 1364 */ 1365 ret = gb_connection_enable(svc->connection); 1366 if (ret) 1367 return ret; 1368 1369 return 0; 1370 } 1371 1372 static void gb_svc_remove_modules(struct gb_svc *svc) 1373 { 1374 struct gb_host_device *hd = svc->hd; 1375 struct gb_module *module, *tmp; 1376 1377 list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { 1378 gb_module_del(module); 1379 list_del(&module->hd_node); 1380 gb_module_put(module); 1381 } 1382 } 1383 1384 void gb_svc_del(struct gb_svc *svc) 1385 { 1386 gb_connection_disable_rx(svc->connection); 1387 1388 /* 1389 * The SVC device may have been registered from the request handler. 1390 */ 1391 if (device_is_registered(&svc->dev)) { 1392 gb_svc_debugfs_exit(svc); 1393 gb_svc_watchdog_destroy(svc); 1394 device_del(&svc->dev); 1395 } 1396 1397 flush_workqueue(svc->wq); 1398 1399 gb_svc_remove_modules(svc); 1400 1401 gb_connection_disable(svc->connection); 1402 } 1403 1404 void gb_svc_put(struct gb_svc *svc) 1405 { 1406 put_device(&svc->dev); 1407 } 1408