1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/device.h> 8 #include <linux/io-64-nonatomic-lo-hi.h> 9 #include <uapi/linux/idxd.h> 10 #include "registers.h" 11 #include "idxd.h" 12 13 static char *idxd_wq_type_names[] = { 14 [IDXD_WQT_NONE] = "none", 15 [IDXD_WQT_KERNEL] = "kernel", 16 [IDXD_WQT_USER] = "user", 17 }; 18 19 static void idxd_conf_device_release(struct device *dev) 20 { 21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); 22 } 23 24 static struct device_type idxd_group_device_type = { 25 .name = "group", 26 .release = idxd_conf_device_release, 27 }; 28 29 static struct device_type idxd_wq_device_type = { 30 .name = "wq", 31 .release = idxd_conf_device_release, 32 }; 33 34 static struct device_type idxd_engine_device_type = { 35 .name = "engine", 36 .release = idxd_conf_device_release, 37 }; 38 39 static struct device_type dsa_device_type = { 40 .name = "dsa", 41 .release = idxd_conf_device_release, 42 }; 43 44 static inline bool is_dsa_dev(struct device *dev) 45 { 46 return dev ? dev->type == &dsa_device_type : false; 47 } 48 49 static inline bool is_idxd_dev(struct device *dev) 50 { 51 return is_dsa_dev(dev); 52 } 53 54 static inline bool is_idxd_wq_dev(struct device *dev) 55 { 56 return dev ? dev->type == &idxd_wq_device_type : false; 57 } 58 59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) 60 { 61 if (wq->type == IDXD_WQT_KERNEL && 62 strcmp(wq->name, "dmaengine") == 0) 63 return true; 64 return false; 65 } 66 67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) 68 { 69 return wq->type == IDXD_WQT_USER; 70 } 71 72 static int idxd_config_bus_match(struct device *dev, 73 struct device_driver *drv) 74 { 75 int matched = 0; 76 77 if (is_idxd_dev(dev)) { 78 struct idxd_device *idxd = confdev_to_idxd(dev); 79 80 if (idxd->state != IDXD_DEV_CONF_READY) 81 return 0; 82 matched = 1; 83 } else if (is_idxd_wq_dev(dev)) { 84 struct idxd_wq *wq = confdev_to_wq(dev); 85 struct idxd_device *idxd = wq->idxd; 86 87 if (idxd->state < IDXD_DEV_CONF_READY) 88 return 0; 89 90 if (wq->state != IDXD_WQ_DISABLED) { 91 dev_dbg(dev, "%s not disabled\n", dev_name(dev)); 92 return 0; 93 } 94 matched = 1; 95 } 96 97 if (matched) 98 dev_dbg(dev, "%s matched\n", dev_name(dev)); 99 100 return matched; 101 } 102 103 static int idxd_config_bus_probe(struct device *dev) 104 { 105 int rc; 106 unsigned long flags; 107 108 dev_dbg(dev, "%s called\n", __func__); 109 110 if (is_idxd_dev(dev)) { 111 struct idxd_device *idxd = confdev_to_idxd(dev); 112 113 if (idxd->state != IDXD_DEV_CONF_READY) { 114 dev_warn(dev, "Device not ready for config\n"); 115 return -EBUSY; 116 } 117 118 if (!try_module_get(THIS_MODULE)) 119 return -ENXIO; 120 121 spin_lock_irqsave(&idxd->dev_lock, flags); 122 123 /* Perform IDXD configuration and enabling */ 124 rc = idxd_device_config(idxd); 125 if (rc < 0) { 126 spin_unlock_irqrestore(&idxd->dev_lock, flags); 127 module_put(THIS_MODULE); 128 dev_warn(dev, "Device config failed: %d\n", rc); 129 return rc; 130 } 131 132 /* start device */ 133 rc = idxd_device_enable(idxd); 134 if (rc < 0) { 135 spin_unlock_irqrestore(&idxd->dev_lock, flags); 136 module_put(THIS_MODULE); 137 dev_warn(dev, "Device enable failed: %d\n", rc); 138 return rc; 139 } 140 141 spin_unlock_irqrestore(&idxd->dev_lock, flags); 142 dev_info(dev, "Device %s enabled\n", dev_name(dev)); 143 144 rc = idxd_register_dma_device(idxd); 145 if (rc < 0) { 146 spin_unlock_irqrestore(&idxd->dev_lock, flags); 147 module_put(THIS_MODULE); 148 dev_dbg(dev, "Failed to register dmaengine device\n"); 149 return rc; 150 } 151 return 0; 152 } else if (is_idxd_wq_dev(dev)) { 153 struct idxd_wq *wq = confdev_to_wq(dev); 154 struct idxd_device *idxd = wq->idxd; 155 156 mutex_lock(&wq->wq_lock); 157 158 if (idxd->state != IDXD_DEV_ENABLED) { 159 mutex_unlock(&wq->wq_lock); 160 dev_warn(dev, "Enabling while device not enabled.\n"); 161 return -EPERM; 162 } 163 164 if (wq->state != IDXD_WQ_DISABLED) { 165 mutex_unlock(&wq->wq_lock); 166 dev_warn(dev, "WQ %d already enabled.\n", wq->id); 167 return -EBUSY; 168 } 169 170 if (!wq->group) { 171 mutex_unlock(&wq->wq_lock); 172 dev_warn(dev, "WQ not attached to group.\n"); 173 return -EINVAL; 174 } 175 176 if (strlen(wq->name) == 0) { 177 mutex_unlock(&wq->wq_lock); 178 dev_warn(dev, "WQ name not set.\n"); 179 return -EINVAL; 180 } 181 182 rc = idxd_wq_alloc_resources(wq); 183 if (rc < 0) { 184 mutex_unlock(&wq->wq_lock); 185 dev_warn(dev, "WQ resource alloc failed\n"); 186 return rc; 187 } 188 189 spin_lock_irqsave(&idxd->dev_lock, flags); 190 rc = idxd_device_config(idxd); 191 if (rc < 0) { 192 spin_unlock_irqrestore(&idxd->dev_lock, flags); 193 mutex_unlock(&wq->wq_lock); 194 dev_warn(dev, "Writing WQ %d config failed: %d\n", 195 wq->id, rc); 196 return rc; 197 } 198 199 rc = idxd_wq_enable(wq); 200 if (rc < 0) { 201 spin_unlock_irqrestore(&idxd->dev_lock, flags); 202 mutex_unlock(&wq->wq_lock); 203 dev_warn(dev, "WQ %d enabling failed: %d\n", 204 wq->id, rc); 205 return rc; 206 } 207 spin_unlock_irqrestore(&idxd->dev_lock, flags); 208 209 rc = idxd_wq_map_portal(wq); 210 if (rc < 0) { 211 dev_warn(dev, "wq portal mapping failed: %d\n", rc); 212 rc = idxd_wq_disable(wq); 213 if (rc < 0) 214 dev_warn(dev, "IDXD wq disable failed\n"); 215 spin_unlock_irqrestore(&idxd->dev_lock, flags); 216 mutex_unlock(&wq->wq_lock); 217 return rc; 218 } 219 220 wq->client_count = 0; 221 222 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); 223 224 if (is_idxd_wq_dmaengine(wq)) { 225 rc = idxd_register_dma_channel(wq); 226 if (rc < 0) { 227 dev_dbg(dev, "DMA channel register failed\n"); 228 mutex_unlock(&wq->wq_lock); 229 return rc; 230 } 231 } else if (is_idxd_wq_cdev(wq)) { 232 rc = idxd_wq_add_cdev(wq); 233 if (rc < 0) { 234 dev_dbg(dev, "Cdev creation failed\n"); 235 mutex_unlock(&wq->wq_lock); 236 return rc; 237 } 238 } 239 240 mutex_unlock(&wq->wq_lock); 241 return 0; 242 } 243 244 return -ENODEV; 245 } 246 247 static void disable_wq(struct idxd_wq *wq) 248 { 249 struct idxd_device *idxd = wq->idxd; 250 struct device *dev = &idxd->pdev->dev; 251 unsigned long flags; 252 int rc; 253 254 mutex_lock(&wq->wq_lock); 255 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); 256 if (wq->state == IDXD_WQ_DISABLED) { 257 mutex_unlock(&wq->wq_lock); 258 return; 259 } 260 261 if (is_idxd_wq_dmaengine(wq)) 262 idxd_unregister_dma_channel(wq); 263 else if (is_idxd_wq_cdev(wq)) 264 idxd_wq_del_cdev(wq); 265 266 if (idxd_wq_refcount(wq)) 267 dev_warn(dev, "Clients has claim on wq %d: %d\n", 268 wq->id, idxd_wq_refcount(wq)); 269 270 idxd_wq_unmap_portal(wq); 271 272 spin_lock_irqsave(&idxd->dev_lock, flags); 273 rc = idxd_wq_disable(wq); 274 spin_unlock_irqrestore(&idxd->dev_lock, flags); 275 276 idxd_wq_free_resources(wq); 277 wq->client_count = 0; 278 mutex_unlock(&wq->wq_lock); 279 280 if (rc < 0) 281 dev_warn(dev, "Failed to disable %s: %d\n", 282 dev_name(&wq->conf_dev), rc); 283 else 284 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); 285 } 286 287 static int idxd_config_bus_remove(struct device *dev) 288 { 289 int rc; 290 unsigned long flags; 291 292 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); 293 294 /* disable workqueue here */ 295 if (is_idxd_wq_dev(dev)) { 296 struct idxd_wq *wq = confdev_to_wq(dev); 297 298 disable_wq(wq); 299 } else if (is_idxd_dev(dev)) { 300 struct idxd_device *idxd = confdev_to_idxd(dev); 301 int i; 302 303 dev_dbg(dev, "%s removing dev %s\n", __func__, 304 dev_name(&idxd->conf_dev)); 305 for (i = 0; i < idxd->max_wqs; i++) { 306 struct idxd_wq *wq = &idxd->wqs[i]; 307 308 if (wq->state == IDXD_WQ_DISABLED) 309 continue; 310 dev_warn(dev, "Active wq %d on disable %s.\n", i, 311 dev_name(&idxd->conf_dev)); 312 device_release_driver(&wq->conf_dev); 313 } 314 315 idxd_unregister_dma_device(idxd); 316 spin_lock_irqsave(&idxd->dev_lock, flags); 317 rc = idxd_device_disable(idxd); 318 for (i = 0; i < idxd->max_wqs; i++) { 319 struct idxd_wq *wq = &idxd->wqs[i]; 320 321 idxd_wq_disable_cleanup(wq); 322 } 323 spin_unlock_irqrestore(&idxd->dev_lock, flags); 324 module_put(THIS_MODULE); 325 if (rc < 0) 326 dev_warn(dev, "Device disable failed\n"); 327 else 328 dev_info(dev, "Device %s disabled\n", dev_name(dev)); 329 330 } 331 332 return 0; 333 } 334 335 static void idxd_config_bus_shutdown(struct device *dev) 336 { 337 dev_dbg(dev, "%s called\n", __func__); 338 } 339 340 struct bus_type dsa_bus_type = { 341 .name = "dsa", 342 .match = idxd_config_bus_match, 343 .probe = idxd_config_bus_probe, 344 .remove = idxd_config_bus_remove, 345 .shutdown = idxd_config_bus_shutdown, 346 }; 347 348 static struct bus_type *idxd_bus_types[] = { 349 &dsa_bus_type 350 }; 351 352 static struct idxd_device_driver dsa_drv = { 353 .drv = { 354 .name = "dsa", 355 .bus = &dsa_bus_type, 356 .owner = THIS_MODULE, 357 .mod_name = KBUILD_MODNAME, 358 }, 359 }; 360 361 static struct idxd_device_driver *idxd_drvs[] = { 362 &dsa_drv 363 }; 364 365 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) 366 { 367 return idxd_bus_types[idxd->type]; 368 } 369 370 static struct device_type *idxd_get_device_type(struct idxd_device *idxd) 371 { 372 if (idxd->type == IDXD_TYPE_DSA) 373 return &dsa_device_type; 374 else 375 return NULL; 376 } 377 378 /* IDXD generic driver setup */ 379 int idxd_register_driver(void) 380 { 381 int i, rc; 382 383 for (i = 0; i < IDXD_TYPE_MAX; i++) { 384 rc = driver_register(&idxd_drvs[i]->drv); 385 if (rc < 0) 386 goto drv_fail; 387 } 388 389 return 0; 390 391 drv_fail: 392 for (; i > 0; i--) 393 driver_unregister(&idxd_drvs[i]->drv); 394 return rc; 395 } 396 397 void idxd_unregister_driver(void) 398 { 399 int i; 400 401 for (i = 0; i < IDXD_TYPE_MAX; i++) 402 driver_unregister(&idxd_drvs[i]->drv); 403 } 404 405 /* IDXD engine attributes */ 406 static ssize_t engine_group_id_show(struct device *dev, 407 struct device_attribute *attr, char *buf) 408 { 409 struct idxd_engine *engine = 410 container_of(dev, struct idxd_engine, conf_dev); 411 412 if (engine->group) 413 return sprintf(buf, "%d\n", engine->group->id); 414 else 415 return sprintf(buf, "%d\n", -1); 416 } 417 418 static ssize_t engine_group_id_store(struct device *dev, 419 struct device_attribute *attr, 420 const char *buf, size_t count) 421 { 422 struct idxd_engine *engine = 423 container_of(dev, struct idxd_engine, conf_dev); 424 struct idxd_device *idxd = engine->idxd; 425 long id; 426 int rc; 427 struct idxd_group *prevg; 428 429 rc = kstrtol(buf, 10, &id); 430 if (rc < 0) 431 return -EINVAL; 432 433 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 434 return -EPERM; 435 436 if (id > idxd->max_groups - 1 || id < -1) 437 return -EINVAL; 438 439 if (id == -1) { 440 if (engine->group) { 441 engine->group->num_engines--; 442 engine->group = NULL; 443 } 444 return count; 445 } 446 447 prevg = engine->group; 448 449 if (prevg) 450 prevg->num_engines--; 451 engine->group = &idxd->groups[id]; 452 engine->group->num_engines++; 453 454 return count; 455 } 456 457 static struct device_attribute dev_attr_engine_group = 458 __ATTR(group_id, 0644, engine_group_id_show, 459 engine_group_id_store); 460 461 static struct attribute *idxd_engine_attributes[] = { 462 &dev_attr_engine_group.attr, 463 NULL, 464 }; 465 466 static const struct attribute_group idxd_engine_attribute_group = { 467 .attrs = idxd_engine_attributes, 468 }; 469 470 static const struct attribute_group *idxd_engine_attribute_groups[] = { 471 &idxd_engine_attribute_group, 472 NULL, 473 }; 474 475 /* Group attributes */ 476 477 static void idxd_set_free_tokens(struct idxd_device *idxd) 478 { 479 int i, tokens; 480 481 for (i = 0, tokens = 0; i < idxd->max_groups; i++) { 482 struct idxd_group *g = &idxd->groups[i]; 483 484 tokens += g->tokens_reserved; 485 } 486 487 idxd->nr_tokens = idxd->max_tokens - tokens; 488 } 489 490 static ssize_t group_tokens_reserved_show(struct device *dev, 491 struct device_attribute *attr, 492 char *buf) 493 { 494 struct idxd_group *group = 495 container_of(dev, struct idxd_group, conf_dev); 496 497 return sprintf(buf, "%u\n", group->tokens_reserved); 498 } 499 500 static ssize_t group_tokens_reserved_store(struct device *dev, 501 struct device_attribute *attr, 502 const char *buf, size_t count) 503 { 504 struct idxd_group *group = 505 container_of(dev, struct idxd_group, conf_dev); 506 struct idxd_device *idxd = group->idxd; 507 unsigned long val; 508 int rc; 509 510 rc = kstrtoul(buf, 10, &val); 511 if (rc < 0) 512 return -EINVAL; 513 514 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 515 return -EPERM; 516 517 if (idxd->state == IDXD_DEV_ENABLED) 518 return -EPERM; 519 520 if (val > idxd->max_tokens) 521 return -EINVAL; 522 523 if (val > idxd->nr_tokens + group->tokens_reserved) 524 return -EINVAL; 525 526 group->tokens_reserved = val; 527 idxd_set_free_tokens(idxd); 528 return count; 529 } 530 531 static struct device_attribute dev_attr_group_tokens_reserved = 532 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, 533 group_tokens_reserved_store); 534 535 static ssize_t group_tokens_allowed_show(struct device *dev, 536 struct device_attribute *attr, 537 char *buf) 538 { 539 struct idxd_group *group = 540 container_of(dev, struct idxd_group, conf_dev); 541 542 return sprintf(buf, "%u\n", group->tokens_allowed); 543 } 544 545 static ssize_t group_tokens_allowed_store(struct device *dev, 546 struct device_attribute *attr, 547 const char *buf, size_t count) 548 { 549 struct idxd_group *group = 550 container_of(dev, struct idxd_group, conf_dev); 551 struct idxd_device *idxd = group->idxd; 552 unsigned long val; 553 int rc; 554 555 rc = kstrtoul(buf, 10, &val); 556 if (rc < 0) 557 return -EINVAL; 558 559 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 560 return -EPERM; 561 562 if (idxd->state == IDXD_DEV_ENABLED) 563 return -EPERM; 564 565 if (val < 4 * group->num_engines || 566 val > group->tokens_reserved + idxd->nr_tokens) 567 return -EINVAL; 568 569 group->tokens_allowed = val; 570 return count; 571 } 572 573 static struct device_attribute dev_attr_group_tokens_allowed = 574 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, 575 group_tokens_allowed_store); 576 577 static ssize_t group_use_token_limit_show(struct device *dev, 578 struct device_attribute *attr, 579 char *buf) 580 { 581 struct idxd_group *group = 582 container_of(dev, struct idxd_group, conf_dev); 583 584 return sprintf(buf, "%u\n", group->use_token_limit); 585 } 586 587 static ssize_t group_use_token_limit_store(struct device *dev, 588 struct device_attribute *attr, 589 const char *buf, size_t count) 590 { 591 struct idxd_group *group = 592 container_of(dev, struct idxd_group, conf_dev); 593 struct idxd_device *idxd = group->idxd; 594 unsigned long val; 595 int rc; 596 597 rc = kstrtoul(buf, 10, &val); 598 if (rc < 0) 599 return -EINVAL; 600 601 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 602 return -EPERM; 603 604 if (idxd->state == IDXD_DEV_ENABLED) 605 return -EPERM; 606 607 if (idxd->token_limit == 0) 608 return -EPERM; 609 610 group->use_token_limit = !!val; 611 return count; 612 } 613 614 static struct device_attribute dev_attr_group_use_token_limit = 615 __ATTR(use_token_limit, 0644, group_use_token_limit_show, 616 group_use_token_limit_store); 617 618 static ssize_t group_engines_show(struct device *dev, 619 struct device_attribute *attr, char *buf) 620 { 621 struct idxd_group *group = 622 container_of(dev, struct idxd_group, conf_dev); 623 int i, rc = 0; 624 char *tmp = buf; 625 struct idxd_device *idxd = group->idxd; 626 627 for (i = 0; i < idxd->max_engines; i++) { 628 struct idxd_engine *engine = &idxd->engines[i]; 629 630 if (!engine->group) 631 continue; 632 633 if (engine->group->id == group->id) 634 rc += sprintf(tmp + rc, "engine%d.%d ", 635 idxd->id, engine->id); 636 } 637 638 rc--; 639 rc += sprintf(tmp + rc, "\n"); 640 641 return rc; 642 } 643 644 static struct device_attribute dev_attr_group_engines = 645 __ATTR(engines, 0444, group_engines_show, NULL); 646 647 static ssize_t group_work_queues_show(struct device *dev, 648 struct device_attribute *attr, char *buf) 649 { 650 struct idxd_group *group = 651 container_of(dev, struct idxd_group, conf_dev); 652 int i, rc = 0; 653 char *tmp = buf; 654 struct idxd_device *idxd = group->idxd; 655 656 for (i = 0; i < idxd->max_wqs; i++) { 657 struct idxd_wq *wq = &idxd->wqs[i]; 658 659 if (!wq->group) 660 continue; 661 662 if (wq->group->id == group->id) 663 rc += sprintf(tmp + rc, "wq%d.%d ", 664 idxd->id, wq->id); 665 } 666 667 rc--; 668 rc += sprintf(tmp + rc, "\n"); 669 670 return rc; 671 } 672 673 static struct device_attribute dev_attr_group_work_queues = 674 __ATTR(work_queues, 0444, group_work_queues_show, NULL); 675 676 static ssize_t group_traffic_class_a_show(struct device *dev, 677 struct device_attribute *attr, 678 char *buf) 679 { 680 struct idxd_group *group = 681 container_of(dev, struct idxd_group, conf_dev); 682 683 return sprintf(buf, "%d\n", group->tc_a); 684 } 685 686 static ssize_t group_traffic_class_a_store(struct device *dev, 687 struct device_attribute *attr, 688 const char *buf, size_t count) 689 { 690 struct idxd_group *group = 691 container_of(dev, struct idxd_group, conf_dev); 692 struct idxd_device *idxd = group->idxd; 693 long val; 694 int rc; 695 696 rc = kstrtol(buf, 10, &val); 697 if (rc < 0) 698 return -EINVAL; 699 700 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 701 return -EPERM; 702 703 if (idxd->state == IDXD_DEV_ENABLED) 704 return -EPERM; 705 706 if (val < 0 || val > 7) 707 return -EINVAL; 708 709 group->tc_a = val; 710 return count; 711 } 712 713 static struct device_attribute dev_attr_group_traffic_class_a = 714 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, 715 group_traffic_class_a_store); 716 717 static ssize_t group_traffic_class_b_show(struct device *dev, 718 struct device_attribute *attr, 719 char *buf) 720 { 721 struct idxd_group *group = 722 container_of(dev, struct idxd_group, conf_dev); 723 724 return sprintf(buf, "%d\n", group->tc_b); 725 } 726 727 static ssize_t group_traffic_class_b_store(struct device *dev, 728 struct device_attribute *attr, 729 const char *buf, size_t count) 730 { 731 struct idxd_group *group = 732 container_of(dev, struct idxd_group, conf_dev); 733 struct idxd_device *idxd = group->idxd; 734 long val; 735 int rc; 736 737 rc = kstrtol(buf, 10, &val); 738 if (rc < 0) 739 return -EINVAL; 740 741 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 742 return -EPERM; 743 744 if (idxd->state == IDXD_DEV_ENABLED) 745 return -EPERM; 746 747 if (val < 0 || val > 7) 748 return -EINVAL; 749 750 group->tc_b = val; 751 return count; 752 } 753 754 static struct device_attribute dev_attr_group_traffic_class_b = 755 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, 756 group_traffic_class_b_store); 757 758 static struct attribute *idxd_group_attributes[] = { 759 &dev_attr_group_work_queues.attr, 760 &dev_attr_group_engines.attr, 761 &dev_attr_group_use_token_limit.attr, 762 &dev_attr_group_tokens_allowed.attr, 763 &dev_attr_group_tokens_reserved.attr, 764 &dev_attr_group_traffic_class_a.attr, 765 &dev_attr_group_traffic_class_b.attr, 766 NULL, 767 }; 768 769 static const struct attribute_group idxd_group_attribute_group = { 770 .attrs = idxd_group_attributes, 771 }; 772 773 static const struct attribute_group *idxd_group_attribute_groups[] = { 774 &idxd_group_attribute_group, 775 NULL, 776 }; 777 778 /* IDXD work queue attribs */ 779 static ssize_t wq_clients_show(struct device *dev, 780 struct device_attribute *attr, char *buf) 781 { 782 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 783 784 return sprintf(buf, "%d\n", wq->client_count); 785 } 786 787 static struct device_attribute dev_attr_wq_clients = 788 __ATTR(clients, 0444, wq_clients_show, NULL); 789 790 static ssize_t wq_state_show(struct device *dev, 791 struct device_attribute *attr, char *buf) 792 { 793 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 794 795 switch (wq->state) { 796 case IDXD_WQ_DISABLED: 797 return sprintf(buf, "disabled\n"); 798 case IDXD_WQ_ENABLED: 799 return sprintf(buf, "enabled\n"); 800 } 801 802 return sprintf(buf, "unknown\n"); 803 } 804 805 static struct device_attribute dev_attr_wq_state = 806 __ATTR(state, 0444, wq_state_show, NULL); 807 808 static ssize_t wq_group_id_show(struct device *dev, 809 struct device_attribute *attr, char *buf) 810 { 811 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 812 813 if (wq->group) 814 return sprintf(buf, "%u\n", wq->group->id); 815 else 816 return sprintf(buf, "-1\n"); 817 } 818 819 static ssize_t wq_group_id_store(struct device *dev, 820 struct device_attribute *attr, 821 const char *buf, size_t count) 822 { 823 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 824 struct idxd_device *idxd = wq->idxd; 825 long id; 826 int rc; 827 struct idxd_group *prevg, *group; 828 829 rc = kstrtol(buf, 10, &id); 830 if (rc < 0) 831 return -EINVAL; 832 833 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 834 return -EPERM; 835 836 if (wq->state != IDXD_WQ_DISABLED) 837 return -EPERM; 838 839 if (id > idxd->max_groups - 1 || id < -1) 840 return -EINVAL; 841 842 if (id == -1) { 843 if (wq->group) { 844 wq->group->num_wqs--; 845 wq->group = NULL; 846 } 847 return count; 848 } 849 850 group = &idxd->groups[id]; 851 prevg = wq->group; 852 853 if (prevg) 854 prevg->num_wqs--; 855 wq->group = group; 856 group->num_wqs++; 857 return count; 858 } 859 860 static struct device_attribute dev_attr_wq_group_id = 861 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); 862 863 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, 864 char *buf) 865 { 866 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 867 868 return sprintf(buf, "%s\n", 869 wq_dedicated(wq) ? "dedicated" : "shared"); 870 } 871 872 static ssize_t wq_mode_store(struct device *dev, 873 struct device_attribute *attr, const char *buf, 874 size_t count) 875 { 876 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 877 struct idxd_device *idxd = wq->idxd; 878 879 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 880 return -EPERM; 881 882 if (wq->state != IDXD_WQ_DISABLED) 883 return -EPERM; 884 885 if (sysfs_streq(buf, "dedicated")) { 886 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 887 wq->threshold = 0; 888 } else { 889 return -EINVAL; 890 } 891 892 return count; 893 } 894 895 static struct device_attribute dev_attr_wq_mode = 896 __ATTR(mode, 0644, wq_mode_show, wq_mode_store); 897 898 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, 899 char *buf) 900 { 901 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 902 903 return sprintf(buf, "%u\n", wq->size); 904 } 905 906 static int total_claimed_wq_size(struct idxd_device *idxd) 907 { 908 int i; 909 int wq_size = 0; 910 911 for (i = 0; i < idxd->max_wqs; i++) { 912 struct idxd_wq *wq = &idxd->wqs[i]; 913 914 wq_size += wq->size; 915 } 916 917 return wq_size; 918 } 919 920 static ssize_t wq_size_store(struct device *dev, 921 struct device_attribute *attr, const char *buf, 922 size_t count) 923 { 924 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 925 unsigned long size; 926 struct idxd_device *idxd = wq->idxd; 927 int rc; 928 929 rc = kstrtoul(buf, 10, &size); 930 if (rc < 0) 931 return -EINVAL; 932 933 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 934 return -EPERM; 935 936 if (wq->state != IDXD_WQ_DISABLED) 937 return -EPERM; 938 939 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) 940 return -EINVAL; 941 942 wq->size = size; 943 return count; 944 } 945 946 static struct device_attribute dev_attr_wq_size = 947 __ATTR(size, 0644, wq_size_show, wq_size_store); 948 949 static ssize_t wq_priority_show(struct device *dev, 950 struct device_attribute *attr, char *buf) 951 { 952 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 953 954 return sprintf(buf, "%u\n", wq->priority); 955 } 956 957 static ssize_t wq_priority_store(struct device *dev, 958 struct device_attribute *attr, 959 const char *buf, size_t count) 960 { 961 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 962 unsigned long prio; 963 struct idxd_device *idxd = wq->idxd; 964 int rc; 965 966 rc = kstrtoul(buf, 10, &prio); 967 if (rc < 0) 968 return -EINVAL; 969 970 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 971 return -EPERM; 972 973 if (wq->state != IDXD_WQ_DISABLED) 974 return -EPERM; 975 976 if (prio > IDXD_MAX_PRIORITY) 977 return -EINVAL; 978 979 wq->priority = prio; 980 return count; 981 } 982 983 static struct device_attribute dev_attr_wq_priority = 984 __ATTR(priority, 0644, wq_priority_show, wq_priority_store); 985 986 static ssize_t wq_type_show(struct device *dev, 987 struct device_attribute *attr, char *buf) 988 { 989 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 990 991 switch (wq->type) { 992 case IDXD_WQT_KERNEL: 993 return sprintf(buf, "%s\n", 994 idxd_wq_type_names[IDXD_WQT_KERNEL]); 995 case IDXD_WQT_USER: 996 return sprintf(buf, "%s\n", 997 idxd_wq_type_names[IDXD_WQT_USER]); 998 case IDXD_WQT_NONE: 999 default: 1000 return sprintf(buf, "%s\n", 1001 idxd_wq_type_names[IDXD_WQT_NONE]); 1002 } 1003 1004 return -EINVAL; 1005 } 1006 1007 static ssize_t wq_type_store(struct device *dev, 1008 struct device_attribute *attr, const char *buf, 1009 size_t count) 1010 { 1011 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1012 enum idxd_wq_type old_type; 1013 1014 if (wq->state != IDXD_WQ_DISABLED) 1015 return -EPERM; 1016 1017 old_type = wq->type; 1018 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) 1019 wq->type = IDXD_WQT_NONE; 1020 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) 1021 wq->type = IDXD_WQT_KERNEL; 1022 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) 1023 wq->type = IDXD_WQT_USER; 1024 else 1025 return -EINVAL; 1026 1027 /* If we are changing queue type, clear the name */ 1028 if (wq->type != old_type) 1029 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1030 1031 return count; 1032 } 1033 1034 static struct device_attribute dev_attr_wq_type = 1035 __ATTR(type, 0644, wq_type_show, wq_type_store); 1036 1037 static ssize_t wq_name_show(struct device *dev, 1038 struct device_attribute *attr, char *buf) 1039 { 1040 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1041 1042 return sprintf(buf, "%s\n", wq->name); 1043 } 1044 1045 static ssize_t wq_name_store(struct device *dev, 1046 struct device_attribute *attr, const char *buf, 1047 size_t count) 1048 { 1049 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1050 1051 if (wq->state != IDXD_WQ_DISABLED) 1052 return -EPERM; 1053 1054 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) 1055 return -EINVAL; 1056 1057 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1058 strncpy(wq->name, buf, WQ_NAME_SIZE); 1059 strreplace(wq->name, '\n', '\0'); 1060 return count; 1061 } 1062 1063 static struct device_attribute dev_attr_wq_name = 1064 __ATTR(name, 0644, wq_name_show, wq_name_store); 1065 1066 static ssize_t wq_cdev_minor_show(struct device *dev, 1067 struct device_attribute *attr, char *buf) 1068 { 1069 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1070 1071 return sprintf(buf, "%d\n", wq->idxd_cdev.minor); 1072 } 1073 1074 static struct device_attribute dev_attr_wq_cdev_minor = 1075 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); 1076 1077 static struct attribute *idxd_wq_attributes[] = { 1078 &dev_attr_wq_clients.attr, 1079 &dev_attr_wq_state.attr, 1080 &dev_attr_wq_group_id.attr, 1081 &dev_attr_wq_mode.attr, 1082 &dev_attr_wq_size.attr, 1083 &dev_attr_wq_priority.attr, 1084 &dev_attr_wq_type.attr, 1085 &dev_attr_wq_name.attr, 1086 &dev_attr_wq_cdev_minor.attr, 1087 NULL, 1088 }; 1089 1090 static const struct attribute_group idxd_wq_attribute_group = { 1091 .attrs = idxd_wq_attributes, 1092 }; 1093 1094 static const struct attribute_group *idxd_wq_attribute_groups[] = { 1095 &idxd_wq_attribute_group, 1096 NULL, 1097 }; 1098 1099 /* IDXD device attribs */ 1100 static ssize_t version_show(struct device *dev, struct device_attribute *attr, 1101 char *buf) 1102 { 1103 struct idxd_device *idxd = 1104 container_of(dev, struct idxd_device, conf_dev); 1105 1106 return sprintf(buf, "%#x\n", idxd->hw.version); 1107 } 1108 static DEVICE_ATTR_RO(version); 1109 1110 static ssize_t max_work_queues_size_show(struct device *dev, 1111 struct device_attribute *attr, 1112 char *buf) 1113 { 1114 struct idxd_device *idxd = 1115 container_of(dev, struct idxd_device, conf_dev); 1116 1117 return sprintf(buf, "%u\n", idxd->max_wq_size); 1118 } 1119 static DEVICE_ATTR_RO(max_work_queues_size); 1120 1121 static ssize_t max_groups_show(struct device *dev, 1122 struct device_attribute *attr, char *buf) 1123 { 1124 struct idxd_device *idxd = 1125 container_of(dev, struct idxd_device, conf_dev); 1126 1127 return sprintf(buf, "%u\n", idxd->max_groups); 1128 } 1129 static DEVICE_ATTR_RO(max_groups); 1130 1131 static ssize_t max_work_queues_show(struct device *dev, 1132 struct device_attribute *attr, char *buf) 1133 { 1134 struct idxd_device *idxd = 1135 container_of(dev, struct idxd_device, conf_dev); 1136 1137 return sprintf(buf, "%u\n", idxd->max_wqs); 1138 } 1139 static DEVICE_ATTR_RO(max_work_queues); 1140 1141 static ssize_t max_engines_show(struct device *dev, 1142 struct device_attribute *attr, char *buf) 1143 { 1144 struct idxd_device *idxd = 1145 container_of(dev, struct idxd_device, conf_dev); 1146 1147 return sprintf(buf, "%u\n", idxd->max_engines); 1148 } 1149 static DEVICE_ATTR_RO(max_engines); 1150 1151 static ssize_t numa_node_show(struct device *dev, 1152 struct device_attribute *attr, char *buf) 1153 { 1154 struct idxd_device *idxd = 1155 container_of(dev, struct idxd_device, conf_dev); 1156 1157 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1158 } 1159 static DEVICE_ATTR_RO(numa_node); 1160 1161 static ssize_t max_batch_size_show(struct device *dev, 1162 struct device_attribute *attr, char *buf) 1163 { 1164 struct idxd_device *idxd = 1165 container_of(dev, struct idxd_device, conf_dev); 1166 1167 return sprintf(buf, "%u\n", idxd->max_batch_size); 1168 } 1169 static DEVICE_ATTR_RO(max_batch_size); 1170 1171 static ssize_t max_transfer_size_show(struct device *dev, 1172 struct device_attribute *attr, 1173 char *buf) 1174 { 1175 struct idxd_device *idxd = 1176 container_of(dev, struct idxd_device, conf_dev); 1177 1178 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); 1179 } 1180 static DEVICE_ATTR_RO(max_transfer_size); 1181 1182 static ssize_t op_cap_show(struct device *dev, 1183 struct device_attribute *attr, char *buf) 1184 { 1185 struct idxd_device *idxd = 1186 container_of(dev, struct idxd_device, conf_dev); 1187 1188 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); 1189 } 1190 static DEVICE_ATTR_RO(op_cap); 1191 1192 static ssize_t gen_cap_show(struct device *dev, 1193 struct device_attribute *attr, char *buf) 1194 { 1195 struct idxd_device *idxd = 1196 container_of(dev, struct idxd_device, conf_dev); 1197 1198 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); 1199 } 1200 static DEVICE_ATTR_RO(gen_cap); 1201 1202 static ssize_t configurable_show(struct device *dev, 1203 struct device_attribute *attr, char *buf) 1204 { 1205 struct idxd_device *idxd = 1206 container_of(dev, struct idxd_device, conf_dev); 1207 1208 return sprintf(buf, "%u\n", 1209 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1210 } 1211 static DEVICE_ATTR_RO(configurable); 1212 1213 static ssize_t clients_show(struct device *dev, 1214 struct device_attribute *attr, char *buf) 1215 { 1216 struct idxd_device *idxd = 1217 container_of(dev, struct idxd_device, conf_dev); 1218 unsigned long flags; 1219 int count = 0, i; 1220 1221 spin_lock_irqsave(&idxd->dev_lock, flags); 1222 for (i = 0; i < idxd->max_wqs; i++) { 1223 struct idxd_wq *wq = &idxd->wqs[i]; 1224 1225 count += wq->client_count; 1226 } 1227 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1228 1229 return sprintf(buf, "%d\n", count); 1230 } 1231 static DEVICE_ATTR_RO(clients); 1232 1233 static ssize_t state_show(struct device *dev, 1234 struct device_attribute *attr, char *buf) 1235 { 1236 struct idxd_device *idxd = 1237 container_of(dev, struct idxd_device, conf_dev); 1238 1239 switch (idxd->state) { 1240 case IDXD_DEV_DISABLED: 1241 case IDXD_DEV_CONF_READY: 1242 return sprintf(buf, "disabled\n"); 1243 case IDXD_DEV_ENABLED: 1244 return sprintf(buf, "enabled\n"); 1245 case IDXD_DEV_HALTED: 1246 return sprintf(buf, "halted\n"); 1247 } 1248 1249 return sprintf(buf, "unknown\n"); 1250 } 1251 static DEVICE_ATTR_RO(state); 1252 1253 static ssize_t errors_show(struct device *dev, 1254 struct device_attribute *attr, char *buf) 1255 { 1256 struct idxd_device *idxd = 1257 container_of(dev, struct idxd_device, conf_dev); 1258 int i, out = 0; 1259 unsigned long flags; 1260 1261 spin_lock_irqsave(&idxd->dev_lock, flags); 1262 for (i = 0; i < 4; i++) 1263 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); 1264 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1265 out--; 1266 out += sprintf(buf + out, "\n"); 1267 return out; 1268 } 1269 static DEVICE_ATTR_RO(errors); 1270 1271 static ssize_t max_tokens_show(struct device *dev, 1272 struct device_attribute *attr, char *buf) 1273 { 1274 struct idxd_device *idxd = 1275 container_of(dev, struct idxd_device, conf_dev); 1276 1277 return sprintf(buf, "%u\n", idxd->max_tokens); 1278 } 1279 static DEVICE_ATTR_RO(max_tokens); 1280 1281 static ssize_t token_limit_show(struct device *dev, 1282 struct device_attribute *attr, char *buf) 1283 { 1284 struct idxd_device *idxd = 1285 container_of(dev, struct idxd_device, conf_dev); 1286 1287 return sprintf(buf, "%u\n", idxd->token_limit); 1288 } 1289 1290 static ssize_t token_limit_store(struct device *dev, 1291 struct device_attribute *attr, 1292 const char *buf, size_t count) 1293 { 1294 struct idxd_device *idxd = 1295 container_of(dev, struct idxd_device, conf_dev); 1296 unsigned long val; 1297 int rc; 1298 1299 rc = kstrtoul(buf, 10, &val); 1300 if (rc < 0) 1301 return -EINVAL; 1302 1303 if (idxd->state == IDXD_DEV_ENABLED) 1304 return -EPERM; 1305 1306 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1307 return -EPERM; 1308 1309 if (!idxd->hw.group_cap.token_limit) 1310 return -EPERM; 1311 1312 if (val > idxd->hw.group_cap.total_tokens) 1313 return -EINVAL; 1314 1315 idxd->token_limit = val; 1316 return count; 1317 } 1318 static DEVICE_ATTR_RW(token_limit); 1319 1320 static ssize_t cdev_major_show(struct device *dev, 1321 struct device_attribute *attr, char *buf) 1322 { 1323 struct idxd_device *idxd = 1324 container_of(dev, struct idxd_device, conf_dev); 1325 1326 return sprintf(buf, "%u\n", idxd->major); 1327 } 1328 static DEVICE_ATTR_RO(cdev_major); 1329 1330 static struct attribute *idxd_device_attributes[] = { 1331 &dev_attr_version.attr, 1332 &dev_attr_max_groups.attr, 1333 &dev_attr_max_work_queues.attr, 1334 &dev_attr_max_work_queues_size.attr, 1335 &dev_attr_max_engines.attr, 1336 &dev_attr_numa_node.attr, 1337 &dev_attr_max_batch_size.attr, 1338 &dev_attr_max_transfer_size.attr, 1339 &dev_attr_op_cap.attr, 1340 &dev_attr_gen_cap.attr, 1341 &dev_attr_configurable.attr, 1342 &dev_attr_clients.attr, 1343 &dev_attr_state.attr, 1344 &dev_attr_errors.attr, 1345 &dev_attr_max_tokens.attr, 1346 &dev_attr_token_limit.attr, 1347 &dev_attr_cdev_major.attr, 1348 NULL, 1349 }; 1350 1351 static const struct attribute_group idxd_device_attribute_group = { 1352 .attrs = idxd_device_attributes, 1353 }; 1354 1355 static const struct attribute_group *idxd_attribute_groups[] = { 1356 &idxd_device_attribute_group, 1357 NULL, 1358 }; 1359 1360 static int idxd_setup_engine_sysfs(struct idxd_device *idxd) 1361 { 1362 struct device *dev = &idxd->pdev->dev; 1363 int i, rc; 1364 1365 for (i = 0; i < idxd->max_engines; i++) { 1366 struct idxd_engine *engine = &idxd->engines[i]; 1367 1368 engine->conf_dev.parent = &idxd->conf_dev; 1369 dev_set_name(&engine->conf_dev, "engine%d.%d", 1370 idxd->id, engine->id); 1371 engine->conf_dev.bus = idxd_get_bus_type(idxd); 1372 engine->conf_dev.groups = idxd_engine_attribute_groups; 1373 engine->conf_dev.type = &idxd_engine_device_type; 1374 dev_dbg(dev, "Engine device register: %s\n", 1375 dev_name(&engine->conf_dev)); 1376 rc = device_register(&engine->conf_dev); 1377 if (rc < 0) { 1378 put_device(&engine->conf_dev); 1379 goto cleanup; 1380 } 1381 } 1382 1383 return 0; 1384 1385 cleanup: 1386 while (i--) { 1387 struct idxd_engine *engine = &idxd->engines[i]; 1388 1389 device_unregister(&engine->conf_dev); 1390 } 1391 return rc; 1392 } 1393 1394 static int idxd_setup_group_sysfs(struct idxd_device *idxd) 1395 { 1396 struct device *dev = &idxd->pdev->dev; 1397 int i, rc; 1398 1399 for (i = 0; i < idxd->max_groups; i++) { 1400 struct idxd_group *group = &idxd->groups[i]; 1401 1402 group->conf_dev.parent = &idxd->conf_dev; 1403 dev_set_name(&group->conf_dev, "group%d.%d", 1404 idxd->id, group->id); 1405 group->conf_dev.bus = idxd_get_bus_type(idxd); 1406 group->conf_dev.groups = idxd_group_attribute_groups; 1407 group->conf_dev.type = &idxd_group_device_type; 1408 dev_dbg(dev, "Group device register: %s\n", 1409 dev_name(&group->conf_dev)); 1410 rc = device_register(&group->conf_dev); 1411 if (rc < 0) { 1412 put_device(&group->conf_dev); 1413 goto cleanup; 1414 } 1415 } 1416 1417 return 0; 1418 1419 cleanup: 1420 while (i--) { 1421 struct idxd_group *group = &idxd->groups[i]; 1422 1423 device_unregister(&group->conf_dev); 1424 } 1425 return rc; 1426 } 1427 1428 static int idxd_setup_wq_sysfs(struct idxd_device *idxd) 1429 { 1430 struct device *dev = &idxd->pdev->dev; 1431 int i, rc; 1432 1433 for (i = 0; i < idxd->max_wqs; i++) { 1434 struct idxd_wq *wq = &idxd->wqs[i]; 1435 1436 wq->conf_dev.parent = &idxd->conf_dev; 1437 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); 1438 wq->conf_dev.bus = idxd_get_bus_type(idxd); 1439 wq->conf_dev.groups = idxd_wq_attribute_groups; 1440 wq->conf_dev.type = &idxd_wq_device_type; 1441 dev_dbg(dev, "WQ device register: %s\n", 1442 dev_name(&wq->conf_dev)); 1443 rc = device_register(&wq->conf_dev); 1444 if (rc < 0) { 1445 put_device(&wq->conf_dev); 1446 goto cleanup; 1447 } 1448 } 1449 1450 return 0; 1451 1452 cleanup: 1453 while (i--) { 1454 struct idxd_wq *wq = &idxd->wqs[i]; 1455 1456 device_unregister(&wq->conf_dev); 1457 } 1458 return rc; 1459 } 1460 1461 static int idxd_setup_device_sysfs(struct idxd_device *idxd) 1462 { 1463 struct device *dev = &idxd->pdev->dev; 1464 int rc; 1465 char devname[IDXD_NAME_SIZE]; 1466 1467 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); 1468 idxd->conf_dev.parent = dev; 1469 dev_set_name(&idxd->conf_dev, "%s", devname); 1470 idxd->conf_dev.bus = idxd_get_bus_type(idxd); 1471 idxd->conf_dev.groups = idxd_attribute_groups; 1472 idxd->conf_dev.type = idxd_get_device_type(idxd); 1473 1474 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); 1475 rc = device_register(&idxd->conf_dev); 1476 if (rc < 0) { 1477 put_device(&idxd->conf_dev); 1478 return rc; 1479 } 1480 1481 return 0; 1482 } 1483 1484 int idxd_setup_sysfs(struct idxd_device *idxd) 1485 { 1486 struct device *dev = &idxd->pdev->dev; 1487 int rc; 1488 1489 rc = idxd_setup_device_sysfs(idxd); 1490 if (rc < 0) { 1491 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); 1492 return rc; 1493 } 1494 1495 rc = idxd_setup_wq_sysfs(idxd); 1496 if (rc < 0) { 1497 /* unregister conf dev */ 1498 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); 1499 return rc; 1500 } 1501 1502 rc = idxd_setup_group_sysfs(idxd); 1503 if (rc < 0) { 1504 /* unregister conf dev */ 1505 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); 1506 return rc; 1507 } 1508 1509 rc = idxd_setup_engine_sysfs(idxd); 1510 if (rc < 0) { 1511 /* unregister conf dev */ 1512 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); 1513 return rc; 1514 } 1515 1516 return 0; 1517 } 1518 1519 void idxd_cleanup_sysfs(struct idxd_device *idxd) 1520 { 1521 int i; 1522 1523 for (i = 0; i < idxd->max_wqs; i++) { 1524 struct idxd_wq *wq = &idxd->wqs[i]; 1525 1526 device_unregister(&wq->conf_dev); 1527 } 1528 1529 for (i = 0; i < idxd->max_engines; i++) { 1530 struct idxd_engine *engine = &idxd->engines[i]; 1531 1532 device_unregister(&engine->conf_dev); 1533 } 1534 1535 for (i = 0; i < idxd->max_groups; i++) { 1536 struct idxd_group *group = &idxd->groups[i]; 1537 1538 device_unregister(&group->conf_dev); 1539 } 1540 1541 device_unregister(&idxd->conf_dev); 1542 } 1543 1544 int idxd_register_bus_type(void) 1545 { 1546 int i, rc; 1547 1548 for (i = 0; i < IDXD_TYPE_MAX; i++) { 1549 rc = bus_register(idxd_bus_types[i]); 1550 if (rc < 0) 1551 goto bus_err; 1552 } 1553 1554 return 0; 1555 1556 bus_err: 1557 for (; i > 0; i--) 1558 bus_unregister(idxd_bus_types[i]); 1559 return rc; 1560 } 1561 1562 void idxd_unregister_bus_type(void) 1563 { 1564 int i; 1565 1566 for (i = 0; i < IDXD_TYPE_MAX; i++) 1567 bus_unregister(idxd_bus_types[i]); 1568 } 1569