1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/device.h> 8 #include <linux/io-64-nonatomic-lo-hi.h> 9 #include <uapi/linux/idxd.h> 10 #include "registers.h" 11 #include "idxd.h" 12 13 static char *idxd_wq_type_names[] = { 14 [IDXD_WQT_NONE] = "none", 15 [IDXD_WQT_KERNEL] = "kernel", 16 }; 17 18 static void idxd_conf_device_release(struct device *dev) 19 { 20 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); 21 } 22 23 static struct device_type idxd_group_device_type = { 24 .name = "group", 25 .release = idxd_conf_device_release, 26 }; 27 28 static struct device_type idxd_wq_device_type = { 29 .name = "wq", 30 .release = idxd_conf_device_release, 31 }; 32 33 static struct device_type idxd_engine_device_type = { 34 .name = "engine", 35 .release = idxd_conf_device_release, 36 }; 37 38 static struct device_type dsa_device_type = { 39 .name = "dsa", 40 .release = idxd_conf_device_release, 41 }; 42 43 static inline bool is_dsa_dev(struct device *dev) 44 { 45 return dev ? dev->type == &dsa_device_type : false; 46 } 47 48 static inline bool is_idxd_dev(struct device *dev) 49 { 50 return is_dsa_dev(dev); 51 } 52 53 static inline bool is_idxd_wq_dev(struct device *dev) 54 { 55 return dev ? dev->type == &idxd_wq_device_type : false; 56 } 57 58 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) 59 { 60 if (wq->type == IDXD_WQT_KERNEL && 61 strcmp(wq->name, "dmaengine") == 0) 62 return true; 63 return false; 64 } 65 66 static int idxd_config_bus_match(struct device *dev, 67 struct device_driver *drv) 68 { 69 int matched = 0; 70 71 if (is_idxd_dev(dev)) { 72 struct idxd_device *idxd = confdev_to_idxd(dev); 73 74 if (idxd->state != IDXD_DEV_CONF_READY) 75 return 0; 76 matched = 1; 77 } else if (is_idxd_wq_dev(dev)) { 78 struct idxd_wq *wq = confdev_to_wq(dev); 79 struct idxd_device *idxd = wq->idxd; 80 81 if (idxd->state < IDXD_DEV_CONF_READY) 82 return 0; 83 84 if (wq->state != IDXD_WQ_DISABLED) { 85 dev_dbg(dev, "%s not disabled\n", dev_name(dev)); 86 return 0; 87 } 88 matched = 1; 89 } 90 91 if (matched) 92 dev_dbg(dev, "%s matched\n", dev_name(dev)); 93 94 return matched; 95 } 96 97 static int idxd_config_bus_probe(struct device *dev) 98 { 99 int rc; 100 unsigned long flags; 101 102 dev_dbg(dev, "%s called\n", __func__); 103 104 if (is_idxd_dev(dev)) { 105 struct idxd_device *idxd = confdev_to_idxd(dev); 106 107 if (idxd->state != IDXD_DEV_CONF_READY) { 108 dev_warn(dev, "Device not ready for config\n"); 109 return -EBUSY; 110 } 111 112 spin_lock_irqsave(&idxd->dev_lock, flags); 113 114 /* Perform IDXD configuration and enabling */ 115 rc = idxd_device_config(idxd); 116 if (rc < 0) { 117 spin_unlock_irqrestore(&idxd->dev_lock, flags); 118 dev_warn(dev, "Device config failed: %d\n", rc); 119 return rc; 120 } 121 122 /* start device */ 123 rc = idxd_device_enable(idxd); 124 if (rc < 0) { 125 spin_unlock_irqrestore(&idxd->dev_lock, flags); 126 dev_warn(dev, "Device enable failed: %d\n", rc); 127 return rc; 128 } 129 130 spin_unlock_irqrestore(&idxd->dev_lock, flags); 131 dev_info(dev, "Device %s enabled\n", dev_name(dev)); 132 133 rc = idxd_register_dma_device(idxd); 134 if (rc < 0) { 135 spin_unlock_irqrestore(&idxd->dev_lock, flags); 136 dev_dbg(dev, "Failed to register dmaengine device\n"); 137 return rc; 138 } 139 return 0; 140 } else if (is_idxd_wq_dev(dev)) { 141 struct idxd_wq *wq = confdev_to_wq(dev); 142 struct idxd_device *idxd = wq->idxd; 143 144 mutex_lock(&wq->wq_lock); 145 146 if (idxd->state != IDXD_DEV_ENABLED) { 147 mutex_unlock(&wq->wq_lock); 148 dev_warn(dev, "Enabling while device not enabled.\n"); 149 return -EPERM; 150 } 151 152 if (wq->state != IDXD_WQ_DISABLED) { 153 mutex_unlock(&wq->wq_lock); 154 dev_warn(dev, "WQ %d already enabled.\n", wq->id); 155 return -EBUSY; 156 } 157 158 if (!wq->group) { 159 mutex_unlock(&wq->wq_lock); 160 dev_warn(dev, "WQ not attached to group.\n"); 161 return -EINVAL; 162 } 163 164 if (strlen(wq->name) == 0) { 165 mutex_unlock(&wq->wq_lock); 166 dev_warn(dev, "WQ name not set.\n"); 167 return -EINVAL; 168 } 169 170 rc = idxd_wq_alloc_resources(wq); 171 if (rc < 0) { 172 mutex_unlock(&wq->wq_lock); 173 dev_warn(dev, "WQ resource alloc failed\n"); 174 return rc; 175 } 176 177 spin_lock_irqsave(&idxd->dev_lock, flags); 178 rc = idxd_device_config(idxd); 179 if (rc < 0) { 180 spin_unlock_irqrestore(&idxd->dev_lock, flags); 181 mutex_unlock(&wq->wq_lock); 182 dev_warn(dev, "Writing WQ %d config failed: %d\n", 183 wq->id, rc); 184 return rc; 185 } 186 187 rc = idxd_wq_enable(wq); 188 if (rc < 0) { 189 spin_unlock_irqrestore(&idxd->dev_lock, flags); 190 mutex_unlock(&wq->wq_lock); 191 dev_warn(dev, "WQ %d enabling failed: %d\n", 192 wq->id, rc); 193 return rc; 194 } 195 spin_unlock_irqrestore(&idxd->dev_lock, flags); 196 197 rc = idxd_wq_map_portal(wq); 198 if (rc < 0) { 199 dev_warn(dev, "wq portal mapping failed: %d\n", rc); 200 rc = idxd_wq_disable(wq); 201 if (rc < 0) 202 dev_warn(dev, "IDXD wq disable failed\n"); 203 spin_unlock_irqrestore(&idxd->dev_lock, flags); 204 mutex_unlock(&wq->wq_lock); 205 return rc; 206 } 207 208 wq->client_count = 0; 209 210 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); 211 212 if (is_idxd_wq_dmaengine(wq)) { 213 rc = idxd_register_dma_channel(wq); 214 if (rc < 0) { 215 dev_dbg(dev, "DMA channel register failed\n"); 216 mutex_unlock(&wq->wq_lock); 217 return rc; 218 } 219 } 220 221 mutex_unlock(&wq->wq_lock); 222 return 0; 223 } 224 225 return -ENODEV; 226 } 227 228 static void disable_wq(struct idxd_wq *wq) 229 { 230 struct idxd_device *idxd = wq->idxd; 231 struct device *dev = &idxd->pdev->dev; 232 unsigned long flags; 233 int rc; 234 235 mutex_lock(&wq->wq_lock); 236 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); 237 if (wq->state == IDXD_WQ_DISABLED) { 238 mutex_unlock(&wq->wq_lock); 239 return; 240 } 241 242 if (is_idxd_wq_dmaengine(wq)) 243 idxd_unregister_dma_channel(wq); 244 245 if (idxd_wq_refcount(wq)) 246 dev_warn(dev, "Clients has claim on wq %d: %d\n", 247 wq->id, idxd_wq_refcount(wq)); 248 249 idxd_wq_unmap_portal(wq); 250 251 spin_lock_irqsave(&idxd->dev_lock, flags); 252 rc = idxd_wq_disable(wq); 253 spin_unlock_irqrestore(&idxd->dev_lock, flags); 254 255 idxd_wq_free_resources(wq); 256 wq->client_count = 0; 257 mutex_unlock(&wq->wq_lock); 258 259 if (rc < 0) 260 dev_warn(dev, "Failed to disable %s: %d\n", 261 dev_name(&wq->conf_dev), rc); 262 else 263 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); 264 } 265 266 static int idxd_config_bus_remove(struct device *dev) 267 { 268 int rc; 269 unsigned long flags; 270 271 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); 272 273 /* disable workqueue here */ 274 if (is_idxd_wq_dev(dev)) { 275 struct idxd_wq *wq = confdev_to_wq(dev); 276 277 disable_wq(wq); 278 } else if (is_idxd_dev(dev)) { 279 struct idxd_device *idxd = confdev_to_idxd(dev); 280 int i; 281 282 dev_dbg(dev, "%s removing dev %s\n", __func__, 283 dev_name(&idxd->conf_dev)); 284 for (i = 0; i < idxd->max_wqs; i++) { 285 struct idxd_wq *wq = &idxd->wqs[i]; 286 287 if (wq->state == IDXD_WQ_DISABLED) 288 continue; 289 dev_warn(dev, "Active wq %d on disable %s.\n", i, 290 dev_name(&idxd->conf_dev)); 291 device_release_driver(&wq->conf_dev); 292 } 293 294 idxd_unregister_dma_device(idxd); 295 spin_lock_irqsave(&idxd->dev_lock, flags); 296 rc = idxd_device_disable(idxd); 297 spin_unlock_irqrestore(&idxd->dev_lock, flags); 298 if (rc < 0) 299 dev_warn(dev, "Device disable failed\n"); 300 else 301 dev_info(dev, "Device %s disabled\n", dev_name(dev)); 302 } 303 304 return 0; 305 } 306 307 static void idxd_config_bus_shutdown(struct device *dev) 308 { 309 dev_dbg(dev, "%s called\n", __func__); 310 } 311 312 static struct bus_type dsa_bus_type = { 313 .name = "dsa", 314 .match = idxd_config_bus_match, 315 .probe = idxd_config_bus_probe, 316 .remove = idxd_config_bus_remove, 317 .shutdown = idxd_config_bus_shutdown, 318 }; 319 320 static struct bus_type *idxd_bus_types[] = { 321 &dsa_bus_type 322 }; 323 324 static struct idxd_device_driver dsa_drv = { 325 .drv = { 326 .name = "dsa", 327 .bus = &dsa_bus_type, 328 .owner = THIS_MODULE, 329 .mod_name = KBUILD_MODNAME, 330 }, 331 }; 332 333 static struct idxd_device_driver *idxd_drvs[] = { 334 &dsa_drv 335 }; 336 337 static struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) 338 { 339 return idxd_bus_types[idxd->type]; 340 } 341 342 static struct device_type *idxd_get_device_type(struct idxd_device *idxd) 343 { 344 if (idxd->type == IDXD_TYPE_DSA) 345 return &dsa_device_type; 346 else 347 return NULL; 348 } 349 350 /* IDXD generic driver setup */ 351 int idxd_register_driver(void) 352 { 353 int i, rc; 354 355 for (i = 0; i < IDXD_TYPE_MAX; i++) { 356 rc = driver_register(&idxd_drvs[i]->drv); 357 if (rc < 0) 358 goto drv_fail; 359 } 360 361 return 0; 362 363 drv_fail: 364 for (; i > 0; i--) 365 driver_unregister(&idxd_drvs[i]->drv); 366 return rc; 367 } 368 369 void idxd_unregister_driver(void) 370 { 371 int i; 372 373 for (i = 0; i < IDXD_TYPE_MAX; i++) 374 driver_unregister(&idxd_drvs[i]->drv); 375 } 376 377 /* IDXD engine attributes */ 378 static ssize_t engine_group_id_show(struct device *dev, 379 struct device_attribute *attr, char *buf) 380 { 381 struct idxd_engine *engine = 382 container_of(dev, struct idxd_engine, conf_dev); 383 384 if (engine->group) 385 return sprintf(buf, "%d\n", engine->group->id); 386 else 387 return sprintf(buf, "%d\n", -1); 388 } 389 390 static ssize_t engine_group_id_store(struct device *dev, 391 struct device_attribute *attr, 392 const char *buf, size_t count) 393 { 394 struct idxd_engine *engine = 395 container_of(dev, struct idxd_engine, conf_dev); 396 struct idxd_device *idxd = engine->idxd; 397 long id; 398 int rc; 399 struct idxd_group *prevg, *group; 400 401 rc = kstrtol(buf, 10, &id); 402 if (rc < 0) 403 return -EINVAL; 404 405 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 406 return -EPERM; 407 408 if (id > idxd->max_groups - 1 || id < -1) 409 return -EINVAL; 410 411 if (id == -1) { 412 if (engine->group) { 413 engine->group->num_engines--; 414 engine->group = NULL; 415 } 416 return count; 417 } 418 419 group = &idxd->groups[id]; 420 prevg = engine->group; 421 422 if (prevg) 423 prevg->num_engines--; 424 engine->group = &idxd->groups[id]; 425 engine->group->num_engines++; 426 427 return count; 428 } 429 430 static struct device_attribute dev_attr_engine_group = 431 __ATTR(group_id, 0644, engine_group_id_show, 432 engine_group_id_store); 433 434 static struct attribute *idxd_engine_attributes[] = { 435 &dev_attr_engine_group.attr, 436 NULL, 437 }; 438 439 static const struct attribute_group idxd_engine_attribute_group = { 440 .attrs = idxd_engine_attributes, 441 }; 442 443 static const struct attribute_group *idxd_engine_attribute_groups[] = { 444 &idxd_engine_attribute_group, 445 NULL, 446 }; 447 448 /* Group attributes */ 449 450 static void idxd_set_free_tokens(struct idxd_device *idxd) 451 { 452 int i, tokens; 453 454 for (i = 0, tokens = 0; i < idxd->max_groups; i++) { 455 struct idxd_group *g = &idxd->groups[i]; 456 457 tokens += g->tokens_reserved; 458 } 459 460 idxd->nr_tokens = idxd->max_tokens - tokens; 461 } 462 463 static ssize_t group_tokens_reserved_show(struct device *dev, 464 struct device_attribute *attr, 465 char *buf) 466 { 467 struct idxd_group *group = 468 container_of(dev, struct idxd_group, conf_dev); 469 470 return sprintf(buf, "%u\n", group->tokens_reserved); 471 } 472 473 static ssize_t group_tokens_reserved_store(struct device *dev, 474 struct device_attribute *attr, 475 const char *buf, size_t count) 476 { 477 struct idxd_group *group = 478 container_of(dev, struct idxd_group, conf_dev); 479 struct idxd_device *idxd = group->idxd; 480 unsigned long val; 481 int rc; 482 483 rc = kstrtoul(buf, 10, &val); 484 if (rc < 0) 485 return -EINVAL; 486 487 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 488 return -EPERM; 489 490 if (idxd->state == IDXD_DEV_ENABLED) 491 return -EPERM; 492 493 if (idxd->token_limit == 0) 494 return -EPERM; 495 496 if (val > idxd->max_tokens) 497 return -EINVAL; 498 499 if (val > idxd->nr_tokens) 500 return -EINVAL; 501 502 group->tokens_reserved = val; 503 idxd_set_free_tokens(idxd); 504 return count; 505 } 506 507 static struct device_attribute dev_attr_group_tokens_reserved = 508 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, 509 group_tokens_reserved_store); 510 511 static ssize_t group_tokens_allowed_show(struct device *dev, 512 struct device_attribute *attr, 513 char *buf) 514 { 515 struct idxd_group *group = 516 container_of(dev, struct idxd_group, conf_dev); 517 518 return sprintf(buf, "%u\n", group->tokens_allowed); 519 } 520 521 static ssize_t group_tokens_allowed_store(struct device *dev, 522 struct device_attribute *attr, 523 const char *buf, size_t count) 524 { 525 struct idxd_group *group = 526 container_of(dev, struct idxd_group, conf_dev); 527 struct idxd_device *idxd = group->idxd; 528 unsigned long val; 529 int rc; 530 531 rc = kstrtoul(buf, 10, &val); 532 if (rc < 0) 533 return -EINVAL; 534 535 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 536 return -EPERM; 537 538 if (idxd->state == IDXD_DEV_ENABLED) 539 return -EPERM; 540 541 if (idxd->token_limit == 0) 542 return -EPERM; 543 if (val < 4 * group->num_engines || 544 val > group->tokens_reserved + idxd->nr_tokens) 545 return -EINVAL; 546 547 group->tokens_allowed = val; 548 return count; 549 } 550 551 static struct device_attribute dev_attr_group_tokens_allowed = 552 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, 553 group_tokens_allowed_store); 554 555 static ssize_t group_use_token_limit_show(struct device *dev, 556 struct device_attribute *attr, 557 char *buf) 558 { 559 struct idxd_group *group = 560 container_of(dev, struct idxd_group, conf_dev); 561 562 return sprintf(buf, "%u\n", group->use_token_limit); 563 } 564 565 static ssize_t group_use_token_limit_store(struct device *dev, 566 struct device_attribute *attr, 567 const char *buf, size_t count) 568 { 569 struct idxd_group *group = 570 container_of(dev, struct idxd_group, conf_dev); 571 struct idxd_device *idxd = group->idxd; 572 unsigned long val; 573 int rc; 574 575 rc = kstrtoul(buf, 10, &val); 576 if (rc < 0) 577 return -EINVAL; 578 579 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 580 return -EPERM; 581 582 if (idxd->state == IDXD_DEV_ENABLED) 583 return -EPERM; 584 585 if (idxd->token_limit == 0) 586 return -EPERM; 587 588 group->use_token_limit = !!val; 589 return count; 590 } 591 592 static struct device_attribute dev_attr_group_use_token_limit = 593 __ATTR(use_token_limit, 0644, group_use_token_limit_show, 594 group_use_token_limit_store); 595 596 static ssize_t group_engines_show(struct device *dev, 597 struct device_attribute *attr, char *buf) 598 { 599 struct idxd_group *group = 600 container_of(dev, struct idxd_group, conf_dev); 601 int i, rc = 0; 602 char *tmp = buf; 603 struct idxd_device *idxd = group->idxd; 604 605 for (i = 0; i < idxd->max_engines; i++) { 606 struct idxd_engine *engine = &idxd->engines[i]; 607 608 if (!engine->group) 609 continue; 610 611 if (engine->group->id == group->id) 612 rc += sprintf(tmp + rc, "engine%d.%d ", 613 idxd->id, engine->id); 614 } 615 616 rc--; 617 rc += sprintf(tmp + rc, "\n"); 618 619 return rc; 620 } 621 622 static struct device_attribute dev_attr_group_engines = 623 __ATTR(engines, 0444, group_engines_show, NULL); 624 625 static ssize_t group_work_queues_show(struct device *dev, 626 struct device_attribute *attr, char *buf) 627 { 628 struct idxd_group *group = 629 container_of(dev, struct idxd_group, conf_dev); 630 int i, rc = 0; 631 char *tmp = buf; 632 struct idxd_device *idxd = group->idxd; 633 634 for (i = 0; i < idxd->max_wqs; i++) { 635 struct idxd_wq *wq = &idxd->wqs[i]; 636 637 if (!wq->group) 638 continue; 639 640 if (wq->group->id == group->id) 641 rc += sprintf(tmp + rc, "wq%d.%d ", 642 idxd->id, wq->id); 643 } 644 645 rc--; 646 rc += sprintf(tmp + rc, "\n"); 647 648 return rc; 649 } 650 651 static struct device_attribute dev_attr_group_work_queues = 652 __ATTR(work_queues, 0444, group_work_queues_show, NULL); 653 654 static ssize_t group_traffic_class_a_show(struct device *dev, 655 struct device_attribute *attr, 656 char *buf) 657 { 658 struct idxd_group *group = 659 container_of(dev, struct idxd_group, conf_dev); 660 661 return sprintf(buf, "%d\n", group->tc_a); 662 } 663 664 static ssize_t group_traffic_class_a_store(struct device *dev, 665 struct device_attribute *attr, 666 const char *buf, size_t count) 667 { 668 struct idxd_group *group = 669 container_of(dev, struct idxd_group, conf_dev); 670 struct idxd_device *idxd = group->idxd; 671 long val; 672 int rc; 673 674 rc = kstrtol(buf, 10, &val); 675 if (rc < 0) 676 return -EINVAL; 677 678 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 679 return -EPERM; 680 681 if (idxd->state == IDXD_DEV_ENABLED) 682 return -EPERM; 683 684 if (val < 0 || val > 7) 685 return -EINVAL; 686 687 group->tc_a = val; 688 return count; 689 } 690 691 static struct device_attribute dev_attr_group_traffic_class_a = 692 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, 693 group_traffic_class_a_store); 694 695 static ssize_t group_traffic_class_b_show(struct device *dev, 696 struct device_attribute *attr, 697 char *buf) 698 { 699 struct idxd_group *group = 700 container_of(dev, struct idxd_group, conf_dev); 701 702 return sprintf(buf, "%d\n", group->tc_b); 703 } 704 705 static ssize_t group_traffic_class_b_store(struct device *dev, 706 struct device_attribute *attr, 707 const char *buf, size_t count) 708 { 709 struct idxd_group *group = 710 container_of(dev, struct idxd_group, conf_dev); 711 struct idxd_device *idxd = group->idxd; 712 long val; 713 int rc; 714 715 rc = kstrtol(buf, 10, &val); 716 if (rc < 0) 717 return -EINVAL; 718 719 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 720 return -EPERM; 721 722 if (idxd->state == IDXD_DEV_ENABLED) 723 return -EPERM; 724 725 if (val < 0 || val > 7) 726 return -EINVAL; 727 728 group->tc_b = val; 729 return count; 730 } 731 732 static struct device_attribute dev_attr_group_traffic_class_b = 733 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, 734 group_traffic_class_b_store); 735 736 static struct attribute *idxd_group_attributes[] = { 737 &dev_attr_group_work_queues.attr, 738 &dev_attr_group_engines.attr, 739 &dev_attr_group_use_token_limit.attr, 740 &dev_attr_group_tokens_allowed.attr, 741 &dev_attr_group_tokens_reserved.attr, 742 &dev_attr_group_traffic_class_a.attr, 743 &dev_attr_group_traffic_class_b.attr, 744 NULL, 745 }; 746 747 static const struct attribute_group idxd_group_attribute_group = { 748 .attrs = idxd_group_attributes, 749 }; 750 751 static const struct attribute_group *idxd_group_attribute_groups[] = { 752 &idxd_group_attribute_group, 753 NULL, 754 }; 755 756 /* IDXD work queue attribs */ 757 static ssize_t wq_clients_show(struct device *dev, 758 struct device_attribute *attr, char *buf) 759 { 760 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 761 762 return sprintf(buf, "%d\n", wq->client_count); 763 } 764 765 static struct device_attribute dev_attr_wq_clients = 766 __ATTR(clients, 0444, wq_clients_show, NULL); 767 768 static ssize_t wq_state_show(struct device *dev, 769 struct device_attribute *attr, char *buf) 770 { 771 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 772 773 switch (wq->state) { 774 case IDXD_WQ_DISABLED: 775 return sprintf(buf, "disabled\n"); 776 case IDXD_WQ_ENABLED: 777 return sprintf(buf, "enabled\n"); 778 } 779 780 return sprintf(buf, "unknown\n"); 781 } 782 783 static struct device_attribute dev_attr_wq_state = 784 __ATTR(state, 0444, wq_state_show, NULL); 785 786 static ssize_t wq_group_id_show(struct device *dev, 787 struct device_attribute *attr, char *buf) 788 { 789 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 790 791 if (wq->group) 792 return sprintf(buf, "%u\n", wq->group->id); 793 else 794 return sprintf(buf, "-1\n"); 795 } 796 797 static ssize_t wq_group_id_store(struct device *dev, 798 struct device_attribute *attr, 799 const char *buf, size_t count) 800 { 801 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 802 struct idxd_device *idxd = wq->idxd; 803 long id; 804 int rc; 805 struct idxd_group *prevg, *group; 806 807 rc = kstrtol(buf, 10, &id); 808 if (rc < 0) 809 return -EINVAL; 810 811 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 812 return -EPERM; 813 814 if (wq->state != IDXD_WQ_DISABLED) 815 return -EPERM; 816 817 if (id > idxd->max_groups - 1 || id < -1) 818 return -EINVAL; 819 820 if (id == -1) { 821 if (wq->group) { 822 wq->group->num_wqs--; 823 wq->group = NULL; 824 } 825 return count; 826 } 827 828 group = &idxd->groups[id]; 829 prevg = wq->group; 830 831 if (prevg) 832 prevg->num_wqs--; 833 wq->group = group; 834 group->num_wqs++; 835 return count; 836 } 837 838 static struct device_attribute dev_attr_wq_group_id = 839 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); 840 841 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, 842 char *buf) 843 { 844 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 845 846 return sprintf(buf, "%s\n", 847 wq_dedicated(wq) ? "dedicated" : "shared"); 848 } 849 850 static ssize_t wq_mode_store(struct device *dev, 851 struct device_attribute *attr, const char *buf, 852 size_t count) 853 { 854 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 855 struct idxd_device *idxd = wq->idxd; 856 857 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 858 return -EPERM; 859 860 if (wq->state != IDXD_WQ_DISABLED) 861 return -EPERM; 862 863 if (sysfs_streq(buf, "dedicated")) { 864 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 865 wq->threshold = 0; 866 } else { 867 return -EINVAL; 868 } 869 870 return count; 871 } 872 873 static struct device_attribute dev_attr_wq_mode = 874 __ATTR(mode, 0644, wq_mode_show, wq_mode_store); 875 876 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, 877 char *buf) 878 { 879 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 880 881 return sprintf(buf, "%u\n", wq->size); 882 } 883 884 static ssize_t wq_size_store(struct device *dev, 885 struct device_attribute *attr, const char *buf, 886 size_t count) 887 { 888 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 889 unsigned long size; 890 struct idxd_device *idxd = wq->idxd; 891 int rc; 892 893 rc = kstrtoul(buf, 10, &size); 894 if (rc < 0) 895 return -EINVAL; 896 897 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 898 return -EPERM; 899 900 if (wq->state != IDXD_WQ_DISABLED) 901 return -EPERM; 902 903 if (size > idxd->max_wq_size) 904 return -EINVAL; 905 906 wq->size = size; 907 return count; 908 } 909 910 static struct device_attribute dev_attr_wq_size = 911 __ATTR(size, 0644, wq_size_show, wq_size_store); 912 913 static ssize_t wq_priority_show(struct device *dev, 914 struct device_attribute *attr, char *buf) 915 { 916 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 917 918 return sprintf(buf, "%u\n", wq->priority); 919 } 920 921 static ssize_t wq_priority_store(struct device *dev, 922 struct device_attribute *attr, 923 const char *buf, size_t count) 924 { 925 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 926 unsigned long prio; 927 struct idxd_device *idxd = wq->idxd; 928 int rc; 929 930 rc = kstrtoul(buf, 10, &prio); 931 if (rc < 0) 932 return -EINVAL; 933 934 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 935 return -EPERM; 936 937 if (wq->state != IDXD_WQ_DISABLED) 938 return -EPERM; 939 940 if (prio > IDXD_MAX_PRIORITY) 941 return -EINVAL; 942 943 wq->priority = prio; 944 return count; 945 } 946 947 static struct device_attribute dev_attr_wq_priority = 948 __ATTR(priority, 0644, wq_priority_show, wq_priority_store); 949 950 static ssize_t wq_type_show(struct device *dev, 951 struct device_attribute *attr, char *buf) 952 { 953 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 954 955 switch (wq->type) { 956 case IDXD_WQT_KERNEL: 957 return sprintf(buf, "%s\n", 958 idxd_wq_type_names[IDXD_WQT_KERNEL]); 959 case IDXD_WQT_NONE: 960 default: 961 return sprintf(buf, "%s\n", 962 idxd_wq_type_names[IDXD_WQT_NONE]); 963 } 964 965 return -EINVAL; 966 } 967 968 static ssize_t wq_type_store(struct device *dev, 969 struct device_attribute *attr, const char *buf, 970 size_t count) 971 { 972 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 973 enum idxd_wq_type old_type; 974 975 if (wq->state != IDXD_WQ_DISABLED) 976 return -EPERM; 977 978 old_type = wq->type; 979 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) 980 wq->type = IDXD_WQT_KERNEL; 981 else 982 wq->type = IDXD_WQT_NONE; 983 984 /* If we are changing queue type, clear the name */ 985 if (wq->type != old_type) 986 memset(wq->name, 0, WQ_NAME_SIZE + 1); 987 988 return count; 989 } 990 991 static struct device_attribute dev_attr_wq_type = 992 __ATTR(type, 0644, wq_type_show, wq_type_store); 993 994 static ssize_t wq_name_show(struct device *dev, 995 struct device_attribute *attr, char *buf) 996 { 997 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 998 999 return sprintf(buf, "%s\n", wq->name); 1000 } 1001 1002 static ssize_t wq_name_store(struct device *dev, 1003 struct device_attribute *attr, const char *buf, 1004 size_t count) 1005 { 1006 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1007 1008 if (wq->state != IDXD_WQ_DISABLED) 1009 return -EPERM; 1010 1011 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) 1012 return -EINVAL; 1013 1014 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1015 strncpy(wq->name, buf, WQ_NAME_SIZE); 1016 strreplace(wq->name, '\n', '\0'); 1017 return count; 1018 } 1019 1020 static struct device_attribute dev_attr_wq_name = 1021 __ATTR(name, 0644, wq_name_show, wq_name_store); 1022 1023 static struct attribute *idxd_wq_attributes[] = { 1024 &dev_attr_wq_clients.attr, 1025 &dev_attr_wq_state.attr, 1026 &dev_attr_wq_group_id.attr, 1027 &dev_attr_wq_mode.attr, 1028 &dev_attr_wq_size.attr, 1029 &dev_attr_wq_priority.attr, 1030 &dev_attr_wq_type.attr, 1031 &dev_attr_wq_name.attr, 1032 NULL, 1033 }; 1034 1035 static const struct attribute_group idxd_wq_attribute_group = { 1036 .attrs = idxd_wq_attributes, 1037 }; 1038 1039 static const struct attribute_group *idxd_wq_attribute_groups[] = { 1040 &idxd_wq_attribute_group, 1041 NULL, 1042 }; 1043 1044 /* IDXD device attribs */ 1045 static ssize_t max_work_queues_size_show(struct device *dev, 1046 struct device_attribute *attr, 1047 char *buf) 1048 { 1049 struct idxd_device *idxd = 1050 container_of(dev, struct idxd_device, conf_dev); 1051 1052 return sprintf(buf, "%u\n", idxd->max_wq_size); 1053 } 1054 static DEVICE_ATTR_RO(max_work_queues_size); 1055 1056 static ssize_t max_groups_show(struct device *dev, 1057 struct device_attribute *attr, char *buf) 1058 { 1059 struct idxd_device *idxd = 1060 container_of(dev, struct idxd_device, conf_dev); 1061 1062 return sprintf(buf, "%u\n", idxd->max_groups); 1063 } 1064 static DEVICE_ATTR_RO(max_groups); 1065 1066 static ssize_t max_work_queues_show(struct device *dev, 1067 struct device_attribute *attr, char *buf) 1068 { 1069 struct idxd_device *idxd = 1070 container_of(dev, struct idxd_device, conf_dev); 1071 1072 return sprintf(buf, "%u\n", idxd->max_wqs); 1073 } 1074 static DEVICE_ATTR_RO(max_work_queues); 1075 1076 static ssize_t max_engines_show(struct device *dev, 1077 struct device_attribute *attr, char *buf) 1078 { 1079 struct idxd_device *idxd = 1080 container_of(dev, struct idxd_device, conf_dev); 1081 1082 return sprintf(buf, "%u\n", idxd->max_engines); 1083 } 1084 static DEVICE_ATTR_RO(max_engines); 1085 1086 static ssize_t numa_node_show(struct device *dev, 1087 struct device_attribute *attr, char *buf) 1088 { 1089 struct idxd_device *idxd = 1090 container_of(dev, struct idxd_device, conf_dev); 1091 1092 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1093 } 1094 static DEVICE_ATTR_RO(numa_node); 1095 1096 static ssize_t max_batch_size_show(struct device *dev, 1097 struct device_attribute *attr, char *buf) 1098 { 1099 struct idxd_device *idxd = 1100 container_of(dev, struct idxd_device, conf_dev); 1101 1102 return sprintf(buf, "%u\n", idxd->max_batch_size); 1103 } 1104 static DEVICE_ATTR_RO(max_batch_size); 1105 1106 static ssize_t max_transfer_size_show(struct device *dev, 1107 struct device_attribute *attr, 1108 char *buf) 1109 { 1110 struct idxd_device *idxd = 1111 container_of(dev, struct idxd_device, conf_dev); 1112 1113 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); 1114 } 1115 static DEVICE_ATTR_RO(max_transfer_size); 1116 1117 static ssize_t op_cap_show(struct device *dev, 1118 struct device_attribute *attr, char *buf) 1119 { 1120 struct idxd_device *idxd = 1121 container_of(dev, struct idxd_device, conf_dev); 1122 1123 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); 1124 } 1125 static DEVICE_ATTR_RO(op_cap); 1126 1127 static ssize_t configurable_show(struct device *dev, 1128 struct device_attribute *attr, char *buf) 1129 { 1130 struct idxd_device *idxd = 1131 container_of(dev, struct idxd_device, conf_dev); 1132 1133 return sprintf(buf, "%u\n", 1134 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1135 } 1136 static DEVICE_ATTR_RO(configurable); 1137 1138 static ssize_t clients_show(struct device *dev, 1139 struct device_attribute *attr, char *buf) 1140 { 1141 struct idxd_device *idxd = 1142 container_of(dev, struct idxd_device, conf_dev); 1143 unsigned long flags; 1144 int count = 0, i; 1145 1146 spin_lock_irqsave(&idxd->dev_lock, flags); 1147 for (i = 0; i < idxd->max_wqs; i++) { 1148 struct idxd_wq *wq = &idxd->wqs[i]; 1149 1150 count += wq->client_count; 1151 } 1152 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1153 1154 return sprintf(buf, "%d\n", count); 1155 } 1156 static DEVICE_ATTR_RO(clients); 1157 1158 static ssize_t state_show(struct device *dev, 1159 struct device_attribute *attr, char *buf) 1160 { 1161 struct idxd_device *idxd = 1162 container_of(dev, struct idxd_device, conf_dev); 1163 1164 switch (idxd->state) { 1165 case IDXD_DEV_DISABLED: 1166 case IDXD_DEV_CONF_READY: 1167 return sprintf(buf, "disabled\n"); 1168 case IDXD_DEV_ENABLED: 1169 return sprintf(buf, "enabled\n"); 1170 case IDXD_DEV_HALTED: 1171 return sprintf(buf, "halted\n"); 1172 } 1173 1174 return sprintf(buf, "unknown\n"); 1175 } 1176 static DEVICE_ATTR_RO(state); 1177 1178 static ssize_t errors_show(struct device *dev, 1179 struct device_attribute *attr, char *buf) 1180 { 1181 struct idxd_device *idxd = 1182 container_of(dev, struct idxd_device, conf_dev); 1183 int i, out = 0; 1184 unsigned long flags; 1185 1186 spin_lock_irqsave(&idxd->dev_lock, flags); 1187 for (i = 0; i < 4; i++) 1188 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); 1189 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1190 out--; 1191 out += sprintf(buf + out, "\n"); 1192 return out; 1193 } 1194 static DEVICE_ATTR_RO(errors); 1195 1196 static ssize_t max_tokens_show(struct device *dev, 1197 struct device_attribute *attr, char *buf) 1198 { 1199 struct idxd_device *idxd = 1200 container_of(dev, struct idxd_device, conf_dev); 1201 1202 return sprintf(buf, "%u\n", idxd->max_tokens); 1203 } 1204 static DEVICE_ATTR_RO(max_tokens); 1205 1206 static ssize_t token_limit_show(struct device *dev, 1207 struct device_attribute *attr, char *buf) 1208 { 1209 struct idxd_device *idxd = 1210 container_of(dev, struct idxd_device, conf_dev); 1211 1212 return sprintf(buf, "%u\n", idxd->token_limit); 1213 } 1214 1215 static ssize_t token_limit_store(struct device *dev, 1216 struct device_attribute *attr, 1217 const char *buf, size_t count) 1218 { 1219 struct idxd_device *idxd = 1220 container_of(dev, struct idxd_device, conf_dev); 1221 unsigned long val; 1222 int rc; 1223 1224 rc = kstrtoul(buf, 10, &val); 1225 if (rc < 0) 1226 return -EINVAL; 1227 1228 if (idxd->state == IDXD_DEV_ENABLED) 1229 return -EPERM; 1230 1231 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1232 return -EPERM; 1233 1234 if (!idxd->hw.group_cap.token_limit) 1235 return -EPERM; 1236 1237 if (val > idxd->hw.group_cap.total_tokens) 1238 return -EINVAL; 1239 1240 idxd->token_limit = val; 1241 return count; 1242 } 1243 static DEVICE_ATTR_RW(token_limit); 1244 1245 static struct attribute *idxd_device_attributes[] = { 1246 &dev_attr_max_groups.attr, 1247 &dev_attr_max_work_queues.attr, 1248 &dev_attr_max_work_queues_size.attr, 1249 &dev_attr_max_engines.attr, 1250 &dev_attr_numa_node.attr, 1251 &dev_attr_max_batch_size.attr, 1252 &dev_attr_max_transfer_size.attr, 1253 &dev_attr_op_cap.attr, 1254 &dev_attr_configurable.attr, 1255 &dev_attr_clients.attr, 1256 &dev_attr_state.attr, 1257 &dev_attr_errors.attr, 1258 &dev_attr_max_tokens.attr, 1259 &dev_attr_token_limit.attr, 1260 NULL, 1261 }; 1262 1263 static const struct attribute_group idxd_device_attribute_group = { 1264 .attrs = idxd_device_attributes, 1265 }; 1266 1267 static const struct attribute_group *idxd_attribute_groups[] = { 1268 &idxd_device_attribute_group, 1269 NULL, 1270 }; 1271 1272 static int idxd_setup_engine_sysfs(struct idxd_device *idxd) 1273 { 1274 struct device *dev = &idxd->pdev->dev; 1275 int i, rc; 1276 1277 for (i = 0; i < idxd->max_engines; i++) { 1278 struct idxd_engine *engine = &idxd->engines[i]; 1279 1280 engine->conf_dev.parent = &idxd->conf_dev; 1281 dev_set_name(&engine->conf_dev, "engine%d.%d", 1282 idxd->id, engine->id); 1283 engine->conf_dev.bus = idxd_get_bus_type(idxd); 1284 engine->conf_dev.groups = idxd_engine_attribute_groups; 1285 engine->conf_dev.type = &idxd_engine_device_type; 1286 dev_dbg(dev, "Engine device register: %s\n", 1287 dev_name(&engine->conf_dev)); 1288 rc = device_register(&engine->conf_dev); 1289 if (rc < 0) { 1290 put_device(&engine->conf_dev); 1291 goto cleanup; 1292 } 1293 } 1294 1295 return 0; 1296 1297 cleanup: 1298 while (i--) { 1299 struct idxd_engine *engine = &idxd->engines[i]; 1300 1301 device_unregister(&engine->conf_dev); 1302 } 1303 return rc; 1304 } 1305 1306 static int idxd_setup_group_sysfs(struct idxd_device *idxd) 1307 { 1308 struct device *dev = &idxd->pdev->dev; 1309 int i, rc; 1310 1311 for (i = 0; i < idxd->max_groups; i++) { 1312 struct idxd_group *group = &idxd->groups[i]; 1313 1314 group->conf_dev.parent = &idxd->conf_dev; 1315 dev_set_name(&group->conf_dev, "group%d.%d", 1316 idxd->id, group->id); 1317 group->conf_dev.bus = idxd_get_bus_type(idxd); 1318 group->conf_dev.groups = idxd_group_attribute_groups; 1319 group->conf_dev.type = &idxd_group_device_type; 1320 dev_dbg(dev, "Group device register: %s\n", 1321 dev_name(&group->conf_dev)); 1322 rc = device_register(&group->conf_dev); 1323 if (rc < 0) { 1324 put_device(&group->conf_dev); 1325 goto cleanup; 1326 } 1327 } 1328 1329 return 0; 1330 1331 cleanup: 1332 while (i--) { 1333 struct idxd_group *group = &idxd->groups[i]; 1334 1335 device_unregister(&group->conf_dev); 1336 } 1337 return rc; 1338 } 1339 1340 static int idxd_setup_wq_sysfs(struct idxd_device *idxd) 1341 { 1342 struct device *dev = &idxd->pdev->dev; 1343 int i, rc; 1344 1345 for (i = 0; i < idxd->max_wqs; i++) { 1346 struct idxd_wq *wq = &idxd->wqs[i]; 1347 1348 wq->conf_dev.parent = &idxd->conf_dev; 1349 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); 1350 wq->conf_dev.bus = idxd_get_bus_type(idxd); 1351 wq->conf_dev.groups = idxd_wq_attribute_groups; 1352 wq->conf_dev.type = &idxd_wq_device_type; 1353 dev_dbg(dev, "WQ device register: %s\n", 1354 dev_name(&wq->conf_dev)); 1355 rc = device_register(&wq->conf_dev); 1356 if (rc < 0) { 1357 put_device(&wq->conf_dev); 1358 goto cleanup; 1359 } 1360 } 1361 1362 return 0; 1363 1364 cleanup: 1365 while (i--) { 1366 struct idxd_wq *wq = &idxd->wqs[i]; 1367 1368 device_unregister(&wq->conf_dev); 1369 } 1370 return rc; 1371 } 1372 1373 static int idxd_setup_device_sysfs(struct idxd_device *idxd) 1374 { 1375 struct device *dev = &idxd->pdev->dev; 1376 int rc; 1377 char devname[IDXD_NAME_SIZE]; 1378 1379 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); 1380 idxd->conf_dev.parent = dev; 1381 dev_set_name(&idxd->conf_dev, "%s", devname); 1382 idxd->conf_dev.bus = idxd_get_bus_type(idxd); 1383 idxd->conf_dev.groups = idxd_attribute_groups; 1384 idxd->conf_dev.type = idxd_get_device_type(idxd); 1385 1386 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); 1387 rc = device_register(&idxd->conf_dev); 1388 if (rc < 0) { 1389 put_device(&idxd->conf_dev); 1390 return rc; 1391 } 1392 1393 return 0; 1394 } 1395 1396 int idxd_setup_sysfs(struct idxd_device *idxd) 1397 { 1398 struct device *dev = &idxd->pdev->dev; 1399 int rc; 1400 1401 rc = idxd_setup_device_sysfs(idxd); 1402 if (rc < 0) { 1403 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); 1404 return rc; 1405 } 1406 1407 rc = idxd_setup_wq_sysfs(idxd); 1408 if (rc < 0) { 1409 /* unregister conf dev */ 1410 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); 1411 return rc; 1412 } 1413 1414 rc = idxd_setup_group_sysfs(idxd); 1415 if (rc < 0) { 1416 /* unregister conf dev */ 1417 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); 1418 return rc; 1419 } 1420 1421 rc = idxd_setup_engine_sysfs(idxd); 1422 if (rc < 0) { 1423 /* unregister conf dev */ 1424 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); 1425 return rc; 1426 } 1427 1428 return 0; 1429 } 1430 1431 void idxd_cleanup_sysfs(struct idxd_device *idxd) 1432 { 1433 int i; 1434 1435 for (i = 0; i < idxd->max_wqs; i++) { 1436 struct idxd_wq *wq = &idxd->wqs[i]; 1437 1438 device_unregister(&wq->conf_dev); 1439 } 1440 1441 for (i = 0; i < idxd->max_engines; i++) { 1442 struct idxd_engine *engine = &idxd->engines[i]; 1443 1444 device_unregister(&engine->conf_dev); 1445 } 1446 1447 for (i = 0; i < idxd->max_groups; i++) { 1448 struct idxd_group *group = &idxd->groups[i]; 1449 1450 device_unregister(&group->conf_dev); 1451 } 1452 1453 device_unregister(&idxd->conf_dev); 1454 } 1455 1456 int idxd_register_bus_type(void) 1457 { 1458 int i, rc; 1459 1460 for (i = 0; i < IDXD_TYPE_MAX; i++) { 1461 rc = bus_register(idxd_bus_types[i]); 1462 if (rc < 0) 1463 goto bus_err; 1464 } 1465 1466 return 0; 1467 1468 bus_err: 1469 for (; i > 0; i--) 1470 bus_unregister(idxd_bus_types[i]); 1471 return rc; 1472 } 1473 1474 void idxd_unregister_bus_type(void) 1475 { 1476 int i; 1477 1478 for (i = 0; i < IDXD_TYPE_MAX; i++) 1479 bus_unregister(idxd_bus_types[i]); 1480 } 1481