1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/device.h> 8 #include <linux/io-64-nonatomic-lo-hi.h> 9 #include <uapi/linux/idxd.h> 10 #include "registers.h" 11 #include "idxd.h" 12 13 static char *idxd_wq_type_names[] = { 14 [IDXD_WQT_NONE] = "none", 15 [IDXD_WQT_KERNEL] = "kernel", 16 [IDXD_WQT_USER] = "user", 17 }; 18 19 static void idxd_conf_device_release(struct device *dev) 20 { 21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); 22 } 23 24 static struct device_type idxd_group_device_type = { 25 .name = "group", 26 .release = idxd_conf_device_release, 27 }; 28 29 static struct device_type idxd_wq_device_type = { 30 .name = "wq", 31 .release = idxd_conf_device_release, 32 }; 33 34 static struct device_type idxd_engine_device_type = { 35 .name = "engine", 36 .release = idxd_conf_device_release, 37 }; 38 39 static struct device_type dsa_device_type = { 40 .name = "dsa", 41 .release = idxd_conf_device_release, 42 }; 43 44 static inline bool is_dsa_dev(struct device *dev) 45 { 46 return dev ? dev->type == &dsa_device_type : false; 47 } 48 49 static inline bool is_idxd_dev(struct device *dev) 50 { 51 return is_dsa_dev(dev); 52 } 53 54 static inline bool is_idxd_wq_dev(struct device *dev) 55 { 56 return dev ? dev->type == &idxd_wq_device_type : false; 57 } 58 59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) 60 { 61 if (wq->type == IDXD_WQT_KERNEL && 62 strcmp(wq->name, "dmaengine") == 0) 63 return true; 64 return false; 65 } 66 67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) 68 { 69 return wq->type == IDXD_WQT_USER; 70 } 71 72 static int idxd_config_bus_match(struct device *dev, 73 struct device_driver *drv) 74 { 75 int matched = 0; 76 77 if (is_idxd_dev(dev)) { 78 struct idxd_device *idxd = confdev_to_idxd(dev); 79 80 if (idxd->state != IDXD_DEV_CONF_READY) 81 return 0; 82 matched = 1; 83 } else if (is_idxd_wq_dev(dev)) { 84 struct idxd_wq *wq = confdev_to_wq(dev); 85 struct idxd_device *idxd = wq->idxd; 86 87 if (idxd->state < IDXD_DEV_CONF_READY) 88 return 0; 89 90 if (wq->state != IDXD_WQ_DISABLED) { 91 dev_dbg(dev, "%s not disabled\n", dev_name(dev)); 92 return 0; 93 } 94 matched = 1; 95 } 96 97 if (matched) 98 dev_dbg(dev, "%s matched\n", dev_name(dev)); 99 100 return matched; 101 } 102 103 static int idxd_config_bus_probe(struct device *dev) 104 { 105 int rc; 106 unsigned long flags; 107 108 dev_dbg(dev, "%s called\n", __func__); 109 110 if (is_idxd_dev(dev)) { 111 struct idxd_device *idxd = confdev_to_idxd(dev); 112 113 if (idxd->state != IDXD_DEV_CONF_READY) { 114 dev_warn(dev, "Device not ready for config\n"); 115 return -EBUSY; 116 } 117 118 if (!try_module_get(THIS_MODULE)) 119 return -ENXIO; 120 121 /* Perform IDXD configuration and enabling */ 122 spin_lock_irqsave(&idxd->dev_lock, flags); 123 rc = idxd_device_config(idxd); 124 spin_unlock_irqrestore(&idxd->dev_lock, flags); 125 if (rc < 0) { 126 module_put(THIS_MODULE); 127 dev_warn(dev, "Device config failed: %d\n", rc); 128 return rc; 129 } 130 131 /* start device */ 132 rc = idxd_device_enable(idxd); 133 if (rc < 0) { 134 module_put(THIS_MODULE); 135 dev_warn(dev, "Device enable failed: %d\n", rc); 136 return rc; 137 } 138 139 dev_info(dev, "Device %s enabled\n", dev_name(dev)); 140 141 rc = idxd_register_dma_device(idxd); 142 if (rc < 0) { 143 module_put(THIS_MODULE); 144 dev_dbg(dev, "Failed to register dmaengine device\n"); 145 return rc; 146 } 147 return 0; 148 } else if (is_idxd_wq_dev(dev)) { 149 struct idxd_wq *wq = confdev_to_wq(dev); 150 struct idxd_device *idxd = wq->idxd; 151 152 mutex_lock(&wq->wq_lock); 153 154 if (idxd->state != IDXD_DEV_ENABLED) { 155 mutex_unlock(&wq->wq_lock); 156 dev_warn(dev, "Enabling while device not enabled.\n"); 157 return -EPERM; 158 } 159 160 if (wq->state != IDXD_WQ_DISABLED) { 161 mutex_unlock(&wq->wq_lock); 162 dev_warn(dev, "WQ %d already enabled.\n", wq->id); 163 return -EBUSY; 164 } 165 166 if (!wq->group) { 167 mutex_unlock(&wq->wq_lock); 168 dev_warn(dev, "WQ not attached to group.\n"); 169 return -EINVAL; 170 } 171 172 if (strlen(wq->name) == 0) { 173 mutex_unlock(&wq->wq_lock); 174 dev_warn(dev, "WQ name not set.\n"); 175 return -EINVAL; 176 } 177 178 rc = idxd_wq_alloc_resources(wq); 179 if (rc < 0) { 180 mutex_unlock(&wq->wq_lock); 181 dev_warn(dev, "WQ resource alloc failed\n"); 182 return rc; 183 } 184 185 spin_lock_irqsave(&idxd->dev_lock, flags); 186 rc = idxd_device_config(idxd); 187 spin_unlock_irqrestore(&idxd->dev_lock, flags); 188 if (rc < 0) { 189 mutex_unlock(&wq->wq_lock); 190 dev_warn(dev, "Writing WQ %d config failed: %d\n", 191 wq->id, rc); 192 return rc; 193 } 194 195 rc = idxd_wq_enable(wq); 196 if (rc < 0) { 197 mutex_unlock(&wq->wq_lock); 198 dev_warn(dev, "WQ %d enabling failed: %d\n", 199 wq->id, rc); 200 return rc; 201 } 202 203 rc = idxd_wq_map_portal(wq); 204 if (rc < 0) { 205 dev_warn(dev, "wq portal mapping failed: %d\n", rc); 206 rc = idxd_wq_disable(wq); 207 if (rc < 0) 208 dev_warn(dev, "IDXD wq disable failed\n"); 209 mutex_unlock(&wq->wq_lock); 210 return rc; 211 } 212 213 wq->client_count = 0; 214 215 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); 216 217 if (is_idxd_wq_dmaengine(wq)) { 218 rc = idxd_register_dma_channel(wq); 219 if (rc < 0) { 220 dev_dbg(dev, "DMA channel register failed\n"); 221 mutex_unlock(&wq->wq_lock); 222 return rc; 223 } 224 } else if (is_idxd_wq_cdev(wq)) { 225 rc = idxd_wq_add_cdev(wq); 226 if (rc < 0) { 227 dev_dbg(dev, "Cdev creation failed\n"); 228 mutex_unlock(&wq->wq_lock); 229 return rc; 230 } 231 } 232 233 mutex_unlock(&wq->wq_lock); 234 return 0; 235 } 236 237 return -ENODEV; 238 } 239 240 static void disable_wq(struct idxd_wq *wq) 241 { 242 struct idxd_device *idxd = wq->idxd; 243 struct device *dev = &idxd->pdev->dev; 244 int rc; 245 246 mutex_lock(&wq->wq_lock); 247 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); 248 if (wq->state == IDXD_WQ_DISABLED) { 249 mutex_unlock(&wq->wq_lock); 250 return; 251 } 252 253 if (is_idxd_wq_dmaengine(wq)) 254 idxd_unregister_dma_channel(wq); 255 else if (is_idxd_wq_cdev(wq)) 256 idxd_wq_del_cdev(wq); 257 258 if (idxd_wq_refcount(wq)) 259 dev_warn(dev, "Clients has claim on wq %d: %d\n", 260 wq->id, idxd_wq_refcount(wq)); 261 262 idxd_wq_unmap_portal(wq); 263 264 idxd_wq_drain(wq); 265 rc = idxd_wq_disable(wq); 266 267 idxd_wq_free_resources(wq); 268 wq->client_count = 0; 269 mutex_unlock(&wq->wq_lock); 270 271 if (rc < 0) 272 dev_warn(dev, "Failed to disable %s: %d\n", 273 dev_name(&wq->conf_dev), rc); 274 else 275 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); 276 } 277 278 static int idxd_config_bus_remove(struct device *dev) 279 { 280 int rc; 281 282 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); 283 284 /* disable workqueue here */ 285 if (is_idxd_wq_dev(dev)) { 286 struct idxd_wq *wq = confdev_to_wq(dev); 287 288 disable_wq(wq); 289 } else if (is_idxd_dev(dev)) { 290 struct idxd_device *idxd = confdev_to_idxd(dev); 291 int i; 292 293 dev_dbg(dev, "%s removing dev %s\n", __func__, 294 dev_name(&idxd->conf_dev)); 295 for (i = 0; i < idxd->max_wqs; i++) { 296 struct idxd_wq *wq = &idxd->wqs[i]; 297 298 if (wq->state == IDXD_WQ_DISABLED) 299 continue; 300 dev_warn(dev, "Active wq %d on disable %s.\n", i, 301 dev_name(&idxd->conf_dev)); 302 device_release_driver(&wq->conf_dev); 303 } 304 305 idxd_unregister_dma_device(idxd); 306 rc = idxd_device_disable(idxd); 307 for (i = 0; i < idxd->max_wqs; i++) { 308 struct idxd_wq *wq = &idxd->wqs[i]; 309 310 mutex_lock(&wq->wq_lock); 311 idxd_wq_disable_cleanup(wq); 312 mutex_unlock(&wq->wq_lock); 313 } 314 module_put(THIS_MODULE); 315 if (rc < 0) 316 dev_warn(dev, "Device disable failed\n"); 317 else 318 dev_info(dev, "Device %s disabled\n", dev_name(dev)); 319 320 } 321 322 return 0; 323 } 324 325 static void idxd_config_bus_shutdown(struct device *dev) 326 { 327 dev_dbg(dev, "%s called\n", __func__); 328 } 329 330 struct bus_type dsa_bus_type = { 331 .name = "dsa", 332 .match = idxd_config_bus_match, 333 .probe = idxd_config_bus_probe, 334 .remove = idxd_config_bus_remove, 335 .shutdown = idxd_config_bus_shutdown, 336 }; 337 338 static struct bus_type *idxd_bus_types[] = { 339 &dsa_bus_type 340 }; 341 342 static struct idxd_device_driver dsa_drv = { 343 .drv = { 344 .name = "dsa", 345 .bus = &dsa_bus_type, 346 .owner = THIS_MODULE, 347 .mod_name = KBUILD_MODNAME, 348 }, 349 }; 350 351 static struct idxd_device_driver *idxd_drvs[] = { 352 &dsa_drv 353 }; 354 355 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) 356 { 357 return idxd_bus_types[idxd->type]; 358 } 359 360 static struct device_type *idxd_get_device_type(struct idxd_device *idxd) 361 { 362 if (idxd->type == IDXD_TYPE_DSA) 363 return &dsa_device_type; 364 else 365 return NULL; 366 } 367 368 /* IDXD generic driver setup */ 369 int idxd_register_driver(void) 370 { 371 int i, rc; 372 373 for (i = 0; i < IDXD_TYPE_MAX; i++) { 374 rc = driver_register(&idxd_drvs[i]->drv); 375 if (rc < 0) 376 goto drv_fail; 377 } 378 379 return 0; 380 381 drv_fail: 382 for (; i > 0; i--) 383 driver_unregister(&idxd_drvs[i]->drv); 384 return rc; 385 } 386 387 void idxd_unregister_driver(void) 388 { 389 int i; 390 391 for (i = 0; i < IDXD_TYPE_MAX; i++) 392 driver_unregister(&idxd_drvs[i]->drv); 393 } 394 395 /* IDXD engine attributes */ 396 static ssize_t engine_group_id_show(struct device *dev, 397 struct device_attribute *attr, char *buf) 398 { 399 struct idxd_engine *engine = 400 container_of(dev, struct idxd_engine, conf_dev); 401 402 if (engine->group) 403 return sprintf(buf, "%d\n", engine->group->id); 404 else 405 return sprintf(buf, "%d\n", -1); 406 } 407 408 static ssize_t engine_group_id_store(struct device *dev, 409 struct device_attribute *attr, 410 const char *buf, size_t count) 411 { 412 struct idxd_engine *engine = 413 container_of(dev, struct idxd_engine, conf_dev); 414 struct idxd_device *idxd = engine->idxd; 415 long id; 416 int rc; 417 struct idxd_group *prevg; 418 419 rc = kstrtol(buf, 10, &id); 420 if (rc < 0) 421 return -EINVAL; 422 423 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 424 return -EPERM; 425 426 if (id > idxd->max_groups - 1 || id < -1) 427 return -EINVAL; 428 429 if (id == -1) { 430 if (engine->group) { 431 engine->group->num_engines--; 432 engine->group = NULL; 433 } 434 return count; 435 } 436 437 prevg = engine->group; 438 439 if (prevg) 440 prevg->num_engines--; 441 engine->group = &idxd->groups[id]; 442 engine->group->num_engines++; 443 444 return count; 445 } 446 447 static struct device_attribute dev_attr_engine_group = 448 __ATTR(group_id, 0644, engine_group_id_show, 449 engine_group_id_store); 450 451 static struct attribute *idxd_engine_attributes[] = { 452 &dev_attr_engine_group.attr, 453 NULL, 454 }; 455 456 static const struct attribute_group idxd_engine_attribute_group = { 457 .attrs = idxd_engine_attributes, 458 }; 459 460 static const struct attribute_group *idxd_engine_attribute_groups[] = { 461 &idxd_engine_attribute_group, 462 NULL, 463 }; 464 465 /* Group attributes */ 466 467 static void idxd_set_free_tokens(struct idxd_device *idxd) 468 { 469 int i, tokens; 470 471 for (i = 0, tokens = 0; i < idxd->max_groups; i++) { 472 struct idxd_group *g = &idxd->groups[i]; 473 474 tokens += g->tokens_reserved; 475 } 476 477 idxd->nr_tokens = idxd->max_tokens - tokens; 478 } 479 480 static ssize_t group_tokens_reserved_show(struct device *dev, 481 struct device_attribute *attr, 482 char *buf) 483 { 484 struct idxd_group *group = 485 container_of(dev, struct idxd_group, conf_dev); 486 487 return sprintf(buf, "%u\n", group->tokens_reserved); 488 } 489 490 static ssize_t group_tokens_reserved_store(struct device *dev, 491 struct device_attribute *attr, 492 const char *buf, size_t count) 493 { 494 struct idxd_group *group = 495 container_of(dev, struct idxd_group, conf_dev); 496 struct idxd_device *idxd = group->idxd; 497 unsigned long val; 498 int rc; 499 500 rc = kstrtoul(buf, 10, &val); 501 if (rc < 0) 502 return -EINVAL; 503 504 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 505 return -EPERM; 506 507 if (idxd->state == IDXD_DEV_ENABLED) 508 return -EPERM; 509 510 if (val > idxd->max_tokens) 511 return -EINVAL; 512 513 if (val > idxd->nr_tokens + group->tokens_reserved) 514 return -EINVAL; 515 516 group->tokens_reserved = val; 517 idxd_set_free_tokens(idxd); 518 return count; 519 } 520 521 static struct device_attribute dev_attr_group_tokens_reserved = 522 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, 523 group_tokens_reserved_store); 524 525 static ssize_t group_tokens_allowed_show(struct device *dev, 526 struct device_attribute *attr, 527 char *buf) 528 { 529 struct idxd_group *group = 530 container_of(dev, struct idxd_group, conf_dev); 531 532 return sprintf(buf, "%u\n", group->tokens_allowed); 533 } 534 535 static ssize_t group_tokens_allowed_store(struct device *dev, 536 struct device_attribute *attr, 537 const char *buf, size_t count) 538 { 539 struct idxd_group *group = 540 container_of(dev, struct idxd_group, conf_dev); 541 struct idxd_device *idxd = group->idxd; 542 unsigned long val; 543 int rc; 544 545 rc = kstrtoul(buf, 10, &val); 546 if (rc < 0) 547 return -EINVAL; 548 549 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 550 return -EPERM; 551 552 if (idxd->state == IDXD_DEV_ENABLED) 553 return -EPERM; 554 555 if (val < 4 * group->num_engines || 556 val > group->tokens_reserved + idxd->nr_tokens) 557 return -EINVAL; 558 559 group->tokens_allowed = val; 560 return count; 561 } 562 563 static struct device_attribute dev_attr_group_tokens_allowed = 564 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, 565 group_tokens_allowed_store); 566 567 static ssize_t group_use_token_limit_show(struct device *dev, 568 struct device_attribute *attr, 569 char *buf) 570 { 571 struct idxd_group *group = 572 container_of(dev, struct idxd_group, conf_dev); 573 574 return sprintf(buf, "%u\n", group->use_token_limit); 575 } 576 577 static ssize_t group_use_token_limit_store(struct device *dev, 578 struct device_attribute *attr, 579 const char *buf, size_t count) 580 { 581 struct idxd_group *group = 582 container_of(dev, struct idxd_group, conf_dev); 583 struct idxd_device *idxd = group->idxd; 584 unsigned long val; 585 int rc; 586 587 rc = kstrtoul(buf, 10, &val); 588 if (rc < 0) 589 return -EINVAL; 590 591 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 592 return -EPERM; 593 594 if (idxd->state == IDXD_DEV_ENABLED) 595 return -EPERM; 596 597 if (idxd->token_limit == 0) 598 return -EPERM; 599 600 group->use_token_limit = !!val; 601 return count; 602 } 603 604 static struct device_attribute dev_attr_group_use_token_limit = 605 __ATTR(use_token_limit, 0644, group_use_token_limit_show, 606 group_use_token_limit_store); 607 608 static ssize_t group_engines_show(struct device *dev, 609 struct device_attribute *attr, char *buf) 610 { 611 struct idxd_group *group = 612 container_of(dev, struct idxd_group, conf_dev); 613 int i, rc = 0; 614 char *tmp = buf; 615 struct idxd_device *idxd = group->idxd; 616 617 for (i = 0; i < idxd->max_engines; i++) { 618 struct idxd_engine *engine = &idxd->engines[i]; 619 620 if (!engine->group) 621 continue; 622 623 if (engine->group->id == group->id) 624 rc += sprintf(tmp + rc, "engine%d.%d ", 625 idxd->id, engine->id); 626 } 627 628 rc--; 629 rc += sprintf(tmp + rc, "\n"); 630 631 return rc; 632 } 633 634 static struct device_attribute dev_attr_group_engines = 635 __ATTR(engines, 0444, group_engines_show, NULL); 636 637 static ssize_t group_work_queues_show(struct device *dev, 638 struct device_attribute *attr, char *buf) 639 { 640 struct idxd_group *group = 641 container_of(dev, struct idxd_group, conf_dev); 642 int i, rc = 0; 643 char *tmp = buf; 644 struct idxd_device *idxd = group->idxd; 645 646 for (i = 0; i < idxd->max_wqs; i++) { 647 struct idxd_wq *wq = &idxd->wqs[i]; 648 649 if (!wq->group) 650 continue; 651 652 if (wq->group->id == group->id) 653 rc += sprintf(tmp + rc, "wq%d.%d ", 654 idxd->id, wq->id); 655 } 656 657 rc--; 658 rc += sprintf(tmp + rc, "\n"); 659 660 return rc; 661 } 662 663 static struct device_attribute dev_attr_group_work_queues = 664 __ATTR(work_queues, 0444, group_work_queues_show, NULL); 665 666 static ssize_t group_traffic_class_a_show(struct device *dev, 667 struct device_attribute *attr, 668 char *buf) 669 { 670 struct idxd_group *group = 671 container_of(dev, struct idxd_group, conf_dev); 672 673 return sprintf(buf, "%d\n", group->tc_a); 674 } 675 676 static ssize_t group_traffic_class_a_store(struct device *dev, 677 struct device_attribute *attr, 678 const char *buf, size_t count) 679 { 680 struct idxd_group *group = 681 container_of(dev, struct idxd_group, conf_dev); 682 struct idxd_device *idxd = group->idxd; 683 long val; 684 int rc; 685 686 rc = kstrtol(buf, 10, &val); 687 if (rc < 0) 688 return -EINVAL; 689 690 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 691 return -EPERM; 692 693 if (idxd->state == IDXD_DEV_ENABLED) 694 return -EPERM; 695 696 if (val < 0 || val > 7) 697 return -EINVAL; 698 699 group->tc_a = val; 700 return count; 701 } 702 703 static struct device_attribute dev_attr_group_traffic_class_a = 704 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, 705 group_traffic_class_a_store); 706 707 static ssize_t group_traffic_class_b_show(struct device *dev, 708 struct device_attribute *attr, 709 char *buf) 710 { 711 struct idxd_group *group = 712 container_of(dev, struct idxd_group, conf_dev); 713 714 return sprintf(buf, "%d\n", group->tc_b); 715 } 716 717 static ssize_t group_traffic_class_b_store(struct device *dev, 718 struct device_attribute *attr, 719 const char *buf, size_t count) 720 { 721 struct idxd_group *group = 722 container_of(dev, struct idxd_group, conf_dev); 723 struct idxd_device *idxd = group->idxd; 724 long val; 725 int rc; 726 727 rc = kstrtol(buf, 10, &val); 728 if (rc < 0) 729 return -EINVAL; 730 731 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 732 return -EPERM; 733 734 if (idxd->state == IDXD_DEV_ENABLED) 735 return -EPERM; 736 737 if (val < 0 || val > 7) 738 return -EINVAL; 739 740 group->tc_b = val; 741 return count; 742 } 743 744 static struct device_attribute dev_attr_group_traffic_class_b = 745 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, 746 group_traffic_class_b_store); 747 748 static struct attribute *idxd_group_attributes[] = { 749 &dev_attr_group_work_queues.attr, 750 &dev_attr_group_engines.attr, 751 &dev_attr_group_use_token_limit.attr, 752 &dev_attr_group_tokens_allowed.attr, 753 &dev_attr_group_tokens_reserved.attr, 754 &dev_attr_group_traffic_class_a.attr, 755 &dev_attr_group_traffic_class_b.attr, 756 NULL, 757 }; 758 759 static const struct attribute_group idxd_group_attribute_group = { 760 .attrs = idxd_group_attributes, 761 }; 762 763 static const struct attribute_group *idxd_group_attribute_groups[] = { 764 &idxd_group_attribute_group, 765 NULL, 766 }; 767 768 /* IDXD work queue attribs */ 769 static ssize_t wq_clients_show(struct device *dev, 770 struct device_attribute *attr, char *buf) 771 { 772 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 773 774 return sprintf(buf, "%d\n", wq->client_count); 775 } 776 777 static struct device_attribute dev_attr_wq_clients = 778 __ATTR(clients, 0444, wq_clients_show, NULL); 779 780 static ssize_t wq_state_show(struct device *dev, 781 struct device_attribute *attr, char *buf) 782 { 783 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 784 785 switch (wq->state) { 786 case IDXD_WQ_DISABLED: 787 return sprintf(buf, "disabled\n"); 788 case IDXD_WQ_ENABLED: 789 return sprintf(buf, "enabled\n"); 790 } 791 792 return sprintf(buf, "unknown\n"); 793 } 794 795 static struct device_attribute dev_attr_wq_state = 796 __ATTR(state, 0444, wq_state_show, NULL); 797 798 static ssize_t wq_group_id_show(struct device *dev, 799 struct device_attribute *attr, char *buf) 800 { 801 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 802 803 if (wq->group) 804 return sprintf(buf, "%u\n", wq->group->id); 805 else 806 return sprintf(buf, "-1\n"); 807 } 808 809 static ssize_t wq_group_id_store(struct device *dev, 810 struct device_attribute *attr, 811 const char *buf, size_t count) 812 { 813 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 814 struct idxd_device *idxd = wq->idxd; 815 long id; 816 int rc; 817 struct idxd_group *prevg, *group; 818 819 rc = kstrtol(buf, 10, &id); 820 if (rc < 0) 821 return -EINVAL; 822 823 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 824 return -EPERM; 825 826 if (wq->state != IDXD_WQ_DISABLED) 827 return -EPERM; 828 829 if (id > idxd->max_groups - 1 || id < -1) 830 return -EINVAL; 831 832 if (id == -1) { 833 if (wq->group) { 834 wq->group->num_wqs--; 835 wq->group = NULL; 836 } 837 return count; 838 } 839 840 group = &idxd->groups[id]; 841 prevg = wq->group; 842 843 if (prevg) 844 prevg->num_wqs--; 845 wq->group = group; 846 group->num_wqs++; 847 return count; 848 } 849 850 static struct device_attribute dev_attr_wq_group_id = 851 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); 852 853 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, 854 char *buf) 855 { 856 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 857 858 return sprintf(buf, "%s\n", 859 wq_dedicated(wq) ? "dedicated" : "shared"); 860 } 861 862 static ssize_t wq_mode_store(struct device *dev, 863 struct device_attribute *attr, const char *buf, 864 size_t count) 865 { 866 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 867 struct idxd_device *idxd = wq->idxd; 868 869 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 870 return -EPERM; 871 872 if (wq->state != IDXD_WQ_DISABLED) 873 return -EPERM; 874 875 if (sysfs_streq(buf, "dedicated")) { 876 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 877 wq->threshold = 0; 878 } else { 879 return -EINVAL; 880 } 881 882 return count; 883 } 884 885 static struct device_attribute dev_attr_wq_mode = 886 __ATTR(mode, 0644, wq_mode_show, wq_mode_store); 887 888 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, 889 char *buf) 890 { 891 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 892 893 return sprintf(buf, "%u\n", wq->size); 894 } 895 896 static int total_claimed_wq_size(struct idxd_device *idxd) 897 { 898 int i; 899 int wq_size = 0; 900 901 for (i = 0; i < idxd->max_wqs; i++) { 902 struct idxd_wq *wq = &idxd->wqs[i]; 903 904 wq_size += wq->size; 905 } 906 907 return wq_size; 908 } 909 910 static ssize_t wq_size_store(struct device *dev, 911 struct device_attribute *attr, const char *buf, 912 size_t count) 913 { 914 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 915 unsigned long size; 916 struct idxd_device *idxd = wq->idxd; 917 int rc; 918 919 rc = kstrtoul(buf, 10, &size); 920 if (rc < 0) 921 return -EINVAL; 922 923 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 924 return -EPERM; 925 926 if (wq->state != IDXD_WQ_DISABLED) 927 return -EPERM; 928 929 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) 930 return -EINVAL; 931 932 wq->size = size; 933 return count; 934 } 935 936 static struct device_attribute dev_attr_wq_size = 937 __ATTR(size, 0644, wq_size_show, wq_size_store); 938 939 static ssize_t wq_priority_show(struct device *dev, 940 struct device_attribute *attr, char *buf) 941 { 942 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 943 944 return sprintf(buf, "%u\n", wq->priority); 945 } 946 947 static ssize_t wq_priority_store(struct device *dev, 948 struct device_attribute *attr, 949 const char *buf, size_t count) 950 { 951 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 952 unsigned long prio; 953 struct idxd_device *idxd = wq->idxd; 954 int rc; 955 956 rc = kstrtoul(buf, 10, &prio); 957 if (rc < 0) 958 return -EINVAL; 959 960 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 961 return -EPERM; 962 963 if (wq->state != IDXD_WQ_DISABLED) 964 return -EPERM; 965 966 if (prio > IDXD_MAX_PRIORITY) 967 return -EINVAL; 968 969 wq->priority = prio; 970 return count; 971 } 972 973 static struct device_attribute dev_attr_wq_priority = 974 __ATTR(priority, 0644, wq_priority_show, wq_priority_store); 975 976 static ssize_t wq_type_show(struct device *dev, 977 struct device_attribute *attr, char *buf) 978 { 979 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 980 981 switch (wq->type) { 982 case IDXD_WQT_KERNEL: 983 return sprintf(buf, "%s\n", 984 idxd_wq_type_names[IDXD_WQT_KERNEL]); 985 case IDXD_WQT_USER: 986 return sprintf(buf, "%s\n", 987 idxd_wq_type_names[IDXD_WQT_USER]); 988 case IDXD_WQT_NONE: 989 default: 990 return sprintf(buf, "%s\n", 991 idxd_wq_type_names[IDXD_WQT_NONE]); 992 } 993 994 return -EINVAL; 995 } 996 997 static ssize_t wq_type_store(struct device *dev, 998 struct device_attribute *attr, const char *buf, 999 size_t count) 1000 { 1001 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1002 enum idxd_wq_type old_type; 1003 1004 if (wq->state != IDXD_WQ_DISABLED) 1005 return -EPERM; 1006 1007 old_type = wq->type; 1008 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) 1009 wq->type = IDXD_WQT_NONE; 1010 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) 1011 wq->type = IDXD_WQT_KERNEL; 1012 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) 1013 wq->type = IDXD_WQT_USER; 1014 else 1015 return -EINVAL; 1016 1017 /* If we are changing queue type, clear the name */ 1018 if (wq->type != old_type) 1019 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1020 1021 return count; 1022 } 1023 1024 static struct device_attribute dev_attr_wq_type = 1025 __ATTR(type, 0644, wq_type_show, wq_type_store); 1026 1027 static ssize_t wq_name_show(struct device *dev, 1028 struct device_attribute *attr, char *buf) 1029 { 1030 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1031 1032 return sprintf(buf, "%s\n", wq->name); 1033 } 1034 1035 static ssize_t wq_name_store(struct device *dev, 1036 struct device_attribute *attr, const char *buf, 1037 size_t count) 1038 { 1039 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1040 1041 if (wq->state != IDXD_WQ_DISABLED) 1042 return -EPERM; 1043 1044 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) 1045 return -EINVAL; 1046 1047 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1048 strncpy(wq->name, buf, WQ_NAME_SIZE); 1049 strreplace(wq->name, '\n', '\0'); 1050 return count; 1051 } 1052 1053 static struct device_attribute dev_attr_wq_name = 1054 __ATTR(name, 0644, wq_name_show, wq_name_store); 1055 1056 static ssize_t wq_cdev_minor_show(struct device *dev, 1057 struct device_attribute *attr, char *buf) 1058 { 1059 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1060 1061 return sprintf(buf, "%d\n", wq->idxd_cdev.minor); 1062 } 1063 1064 static struct device_attribute dev_attr_wq_cdev_minor = 1065 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); 1066 1067 static int __get_sysfs_u64(const char *buf, u64 *val) 1068 { 1069 int rc; 1070 1071 rc = kstrtou64(buf, 0, val); 1072 if (rc < 0) 1073 return -EINVAL; 1074 1075 if (*val == 0) 1076 return -EINVAL; 1077 1078 *val = roundup_pow_of_two(*val); 1079 return 0; 1080 } 1081 1082 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr, 1083 char *buf) 1084 { 1085 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1086 1087 return sprintf(buf, "%llu\n", wq->max_xfer_bytes); 1088 } 1089 1090 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, 1091 const char *buf, size_t count) 1092 { 1093 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1094 struct idxd_device *idxd = wq->idxd; 1095 u64 xfer_size; 1096 int rc; 1097 1098 if (wq->state != IDXD_WQ_DISABLED) 1099 return -EPERM; 1100 1101 rc = __get_sysfs_u64(buf, &xfer_size); 1102 if (rc < 0) 1103 return rc; 1104 1105 if (xfer_size > idxd->max_xfer_bytes) 1106 return -EINVAL; 1107 1108 wq->max_xfer_bytes = xfer_size; 1109 1110 return count; 1111 } 1112 1113 static struct device_attribute dev_attr_wq_max_transfer_size = 1114 __ATTR(max_transfer_size, 0644, 1115 wq_max_transfer_size_show, wq_max_transfer_size_store); 1116 1117 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf) 1118 { 1119 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1120 1121 return sprintf(buf, "%u\n", wq->max_batch_size); 1122 } 1123 1124 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, 1125 const char *buf, size_t count) 1126 { 1127 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1128 struct idxd_device *idxd = wq->idxd; 1129 u64 batch_size; 1130 int rc; 1131 1132 if (wq->state != IDXD_WQ_DISABLED) 1133 return -EPERM; 1134 1135 rc = __get_sysfs_u64(buf, &batch_size); 1136 if (rc < 0) 1137 return rc; 1138 1139 if (batch_size > idxd->max_batch_size) 1140 return -EINVAL; 1141 1142 wq->max_batch_size = (u32)batch_size; 1143 1144 return count; 1145 } 1146 1147 static struct device_attribute dev_attr_wq_max_batch_size = 1148 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store); 1149 1150 static struct attribute *idxd_wq_attributes[] = { 1151 &dev_attr_wq_clients.attr, 1152 &dev_attr_wq_state.attr, 1153 &dev_attr_wq_group_id.attr, 1154 &dev_attr_wq_mode.attr, 1155 &dev_attr_wq_size.attr, 1156 &dev_attr_wq_priority.attr, 1157 &dev_attr_wq_type.attr, 1158 &dev_attr_wq_name.attr, 1159 &dev_attr_wq_cdev_minor.attr, 1160 &dev_attr_wq_max_transfer_size.attr, 1161 &dev_attr_wq_max_batch_size.attr, 1162 NULL, 1163 }; 1164 1165 static const struct attribute_group idxd_wq_attribute_group = { 1166 .attrs = idxd_wq_attributes, 1167 }; 1168 1169 static const struct attribute_group *idxd_wq_attribute_groups[] = { 1170 &idxd_wq_attribute_group, 1171 NULL, 1172 }; 1173 1174 /* IDXD device attribs */ 1175 static ssize_t version_show(struct device *dev, struct device_attribute *attr, 1176 char *buf) 1177 { 1178 struct idxd_device *idxd = 1179 container_of(dev, struct idxd_device, conf_dev); 1180 1181 return sprintf(buf, "%#x\n", idxd->hw.version); 1182 } 1183 static DEVICE_ATTR_RO(version); 1184 1185 static ssize_t max_work_queues_size_show(struct device *dev, 1186 struct device_attribute *attr, 1187 char *buf) 1188 { 1189 struct idxd_device *idxd = 1190 container_of(dev, struct idxd_device, conf_dev); 1191 1192 return sprintf(buf, "%u\n", idxd->max_wq_size); 1193 } 1194 static DEVICE_ATTR_RO(max_work_queues_size); 1195 1196 static ssize_t max_groups_show(struct device *dev, 1197 struct device_attribute *attr, char *buf) 1198 { 1199 struct idxd_device *idxd = 1200 container_of(dev, struct idxd_device, conf_dev); 1201 1202 return sprintf(buf, "%u\n", idxd->max_groups); 1203 } 1204 static DEVICE_ATTR_RO(max_groups); 1205 1206 static ssize_t max_work_queues_show(struct device *dev, 1207 struct device_attribute *attr, char *buf) 1208 { 1209 struct idxd_device *idxd = 1210 container_of(dev, struct idxd_device, conf_dev); 1211 1212 return sprintf(buf, "%u\n", idxd->max_wqs); 1213 } 1214 static DEVICE_ATTR_RO(max_work_queues); 1215 1216 static ssize_t max_engines_show(struct device *dev, 1217 struct device_attribute *attr, char *buf) 1218 { 1219 struct idxd_device *idxd = 1220 container_of(dev, struct idxd_device, conf_dev); 1221 1222 return sprintf(buf, "%u\n", idxd->max_engines); 1223 } 1224 static DEVICE_ATTR_RO(max_engines); 1225 1226 static ssize_t numa_node_show(struct device *dev, 1227 struct device_attribute *attr, char *buf) 1228 { 1229 struct idxd_device *idxd = 1230 container_of(dev, struct idxd_device, conf_dev); 1231 1232 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1233 } 1234 static DEVICE_ATTR_RO(numa_node); 1235 1236 static ssize_t max_batch_size_show(struct device *dev, 1237 struct device_attribute *attr, char *buf) 1238 { 1239 struct idxd_device *idxd = 1240 container_of(dev, struct idxd_device, conf_dev); 1241 1242 return sprintf(buf, "%u\n", idxd->max_batch_size); 1243 } 1244 static DEVICE_ATTR_RO(max_batch_size); 1245 1246 static ssize_t max_transfer_size_show(struct device *dev, 1247 struct device_attribute *attr, 1248 char *buf) 1249 { 1250 struct idxd_device *idxd = 1251 container_of(dev, struct idxd_device, conf_dev); 1252 1253 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); 1254 } 1255 static DEVICE_ATTR_RO(max_transfer_size); 1256 1257 static ssize_t op_cap_show(struct device *dev, 1258 struct device_attribute *attr, char *buf) 1259 { 1260 struct idxd_device *idxd = 1261 container_of(dev, struct idxd_device, conf_dev); 1262 1263 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); 1264 } 1265 static DEVICE_ATTR_RO(op_cap); 1266 1267 static ssize_t gen_cap_show(struct device *dev, 1268 struct device_attribute *attr, char *buf) 1269 { 1270 struct idxd_device *idxd = 1271 container_of(dev, struct idxd_device, conf_dev); 1272 1273 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); 1274 } 1275 static DEVICE_ATTR_RO(gen_cap); 1276 1277 static ssize_t configurable_show(struct device *dev, 1278 struct device_attribute *attr, char *buf) 1279 { 1280 struct idxd_device *idxd = 1281 container_of(dev, struct idxd_device, conf_dev); 1282 1283 return sprintf(buf, "%u\n", 1284 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1285 } 1286 static DEVICE_ATTR_RO(configurable); 1287 1288 static ssize_t clients_show(struct device *dev, 1289 struct device_attribute *attr, char *buf) 1290 { 1291 struct idxd_device *idxd = 1292 container_of(dev, struct idxd_device, conf_dev); 1293 unsigned long flags; 1294 int count = 0, i; 1295 1296 spin_lock_irqsave(&idxd->dev_lock, flags); 1297 for (i = 0; i < idxd->max_wqs; i++) { 1298 struct idxd_wq *wq = &idxd->wqs[i]; 1299 1300 count += wq->client_count; 1301 } 1302 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1303 1304 return sprintf(buf, "%d\n", count); 1305 } 1306 static DEVICE_ATTR_RO(clients); 1307 1308 static ssize_t state_show(struct device *dev, 1309 struct device_attribute *attr, char *buf) 1310 { 1311 struct idxd_device *idxd = 1312 container_of(dev, struct idxd_device, conf_dev); 1313 1314 switch (idxd->state) { 1315 case IDXD_DEV_DISABLED: 1316 case IDXD_DEV_CONF_READY: 1317 return sprintf(buf, "disabled\n"); 1318 case IDXD_DEV_ENABLED: 1319 return sprintf(buf, "enabled\n"); 1320 case IDXD_DEV_HALTED: 1321 return sprintf(buf, "halted\n"); 1322 } 1323 1324 return sprintf(buf, "unknown\n"); 1325 } 1326 static DEVICE_ATTR_RO(state); 1327 1328 static ssize_t errors_show(struct device *dev, 1329 struct device_attribute *attr, char *buf) 1330 { 1331 struct idxd_device *idxd = 1332 container_of(dev, struct idxd_device, conf_dev); 1333 int i, out = 0; 1334 unsigned long flags; 1335 1336 spin_lock_irqsave(&idxd->dev_lock, flags); 1337 for (i = 0; i < 4; i++) 1338 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); 1339 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1340 out--; 1341 out += sprintf(buf + out, "\n"); 1342 return out; 1343 } 1344 static DEVICE_ATTR_RO(errors); 1345 1346 static ssize_t max_tokens_show(struct device *dev, 1347 struct device_attribute *attr, char *buf) 1348 { 1349 struct idxd_device *idxd = 1350 container_of(dev, struct idxd_device, conf_dev); 1351 1352 return sprintf(buf, "%u\n", idxd->max_tokens); 1353 } 1354 static DEVICE_ATTR_RO(max_tokens); 1355 1356 static ssize_t token_limit_show(struct device *dev, 1357 struct device_attribute *attr, char *buf) 1358 { 1359 struct idxd_device *idxd = 1360 container_of(dev, struct idxd_device, conf_dev); 1361 1362 return sprintf(buf, "%u\n", idxd->token_limit); 1363 } 1364 1365 static ssize_t token_limit_store(struct device *dev, 1366 struct device_attribute *attr, 1367 const char *buf, size_t count) 1368 { 1369 struct idxd_device *idxd = 1370 container_of(dev, struct idxd_device, conf_dev); 1371 unsigned long val; 1372 int rc; 1373 1374 rc = kstrtoul(buf, 10, &val); 1375 if (rc < 0) 1376 return -EINVAL; 1377 1378 if (idxd->state == IDXD_DEV_ENABLED) 1379 return -EPERM; 1380 1381 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1382 return -EPERM; 1383 1384 if (!idxd->hw.group_cap.token_limit) 1385 return -EPERM; 1386 1387 if (val > idxd->hw.group_cap.total_tokens) 1388 return -EINVAL; 1389 1390 idxd->token_limit = val; 1391 return count; 1392 } 1393 static DEVICE_ATTR_RW(token_limit); 1394 1395 static ssize_t cdev_major_show(struct device *dev, 1396 struct device_attribute *attr, char *buf) 1397 { 1398 struct idxd_device *idxd = 1399 container_of(dev, struct idxd_device, conf_dev); 1400 1401 return sprintf(buf, "%u\n", idxd->major); 1402 } 1403 static DEVICE_ATTR_RO(cdev_major); 1404 1405 static ssize_t cmd_status_show(struct device *dev, 1406 struct device_attribute *attr, char *buf) 1407 { 1408 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); 1409 1410 return sprintf(buf, "%#x\n", idxd->cmd_status); 1411 } 1412 static DEVICE_ATTR_RO(cmd_status); 1413 1414 static struct attribute *idxd_device_attributes[] = { 1415 &dev_attr_version.attr, 1416 &dev_attr_max_groups.attr, 1417 &dev_attr_max_work_queues.attr, 1418 &dev_attr_max_work_queues_size.attr, 1419 &dev_attr_max_engines.attr, 1420 &dev_attr_numa_node.attr, 1421 &dev_attr_max_batch_size.attr, 1422 &dev_attr_max_transfer_size.attr, 1423 &dev_attr_op_cap.attr, 1424 &dev_attr_gen_cap.attr, 1425 &dev_attr_configurable.attr, 1426 &dev_attr_clients.attr, 1427 &dev_attr_state.attr, 1428 &dev_attr_errors.attr, 1429 &dev_attr_max_tokens.attr, 1430 &dev_attr_token_limit.attr, 1431 &dev_attr_cdev_major.attr, 1432 &dev_attr_cmd_status.attr, 1433 NULL, 1434 }; 1435 1436 static const struct attribute_group idxd_device_attribute_group = { 1437 .attrs = idxd_device_attributes, 1438 }; 1439 1440 static const struct attribute_group *idxd_attribute_groups[] = { 1441 &idxd_device_attribute_group, 1442 NULL, 1443 }; 1444 1445 static int idxd_setup_engine_sysfs(struct idxd_device *idxd) 1446 { 1447 struct device *dev = &idxd->pdev->dev; 1448 int i, rc; 1449 1450 for (i = 0; i < idxd->max_engines; i++) { 1451 struct idxd_engine *engine = &idxd->engines[i]; 1452 1453 engine->conf_dev.parent = &idxd->conf_dev; 1454 dev_set_name(&engine->conf_dev, "engine%d.%d", 1455 idxd->id, engine->id); 1456 engine->conf_dev.bus = idxd_get_bus_type(idxd); 1457 engine->conf_dev.groups = idxd_engine_attribute_groups; 1458 engine->conf_dev.type = &idxd_engine_device_type; 1459 dev_dbg(dev, "Engine device register: %s\n", 1460 dev_name(&engine->conf_dev)); 1461 rc = device_register(&engine->conf_dev); 1462 if (rc < 0) { 1463 put_device(&engine->conf_dev); 1464 goto cleanup; 1465 } 1466 } 1467 1468 return 0; 1469 1470 cleanup: 1471 while (i--) { 1472 struct idxd_engine *engine = &idxd->engines[i]; 1473 1474 device_unregister(&engine->conf_dev); 1475 } 1476 return rc; 1477 } 1478 1479 static int idxd_setup_group_sysfs(struct idxd_device *idxd) 1480 { 1481 struct device *dev = &idxd->pdev->dev; 1482 int i, rc; 1483 1484 for (i = 0; i < idxd->max_groups; i++) { 1485 struct idxd_group *group = &idxd->groups[i]; 1486 1487 group->conf_dev.parent = &idxd->conf_dev; 1488 dev_set_name(&group->conf_dev, "group%d.%d", 1489 idxd->id, group->id); 1490 group->conf_dev.bus = idxd_get_bus_type(idxd); 1491 group->conf_dev.groups = idxd_group_attribute_groups; 1492 group->conf_dev.type = &idxd_group_device_type; 1493 dev_dbg(dev, "Group device register: %s\n", 1494 dev_name(&group->conf_dev)); 1495 rc = device_register(&group->conf_dev); 1496 if (rc < 0) { 1497 put_device(&group->conf_dev); 1498 goto cleanup; 1499 } 1500 } 1501 1502 return 0; 1503 1504 cleanup: 1505 while (i--) { 1506 struct idxd_group *group = &idxd->groups[i]; 1507 1508 device_unregister(&group->conf_dev); 1509 } 1510 return rc; 1511 } 1512 1513 static int idxd_setup_wq_sysfs(struct idxd_device *idxd) 1514 { 1515 struct device *dev = &idxd->pdev->dev; 1516 int i, rc; 1517 1518 for (i = 0; i < idxd->max_wqs; i++) { 1519 struct idxd_wq *wq = &idxd->wqs[i]; 1520 1521 wq->conf_dev.parent = &idxd->conf_dev; 1522 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); 1523 wq->conf_dev.bus = idxd_get_bus_type(idxd); 1524 wq->conf_dev.groups = idxd_wq_attribute_groups; 1525 wq->conf_dev.type = &idxd_wq_device_type; 1526 dev_dbg(dev, "WQ device register: %s\n", 1527 dev_name(&wq->conf_dev)); 1528 rc = device_register(&wq->conf_dev); 1529 if (rc < 0) { 1530 put_device(&wq->conf_dev); 1531 goto cleanup; 1532 } 1533 } 1534 1535 return 0; 1536 1537 cleanup: 1538 while (i--) { 1539 struct idxd_wq *wq = &idxd->wqs[i]; 1540 1541 device_unregister(&wq->conf_dev); 1542 } 1543 return rc; 1544 } 1545 1546 static int idxd_setup_device_sysfs(struct idxd_device *idxd) 1547 { 1548 struct device *dev = &idxd->pdev->dev; 1549 int rc; 1550 char devname[IDXD_NAME_SIZE]; 1551 1552 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); 1553 idxd->conf_dev.parent = dev; 1554 dev_set_name(&idxd->conf_dev, "%s", devname); 1555 idxd->conf_dev.bus = idxd_get_bus_type(idxd); 1556 idxd->conf_dev.groups = idxd_attribute_groups; 1557 idxd->conf_dev.type = idxd_get_device_type(idxd); 1558 1559 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); 1560 rc = device_register(&idxd->conf_dev); 1561 if (rc < 0) { 1562 put_device(&idxd->conf_dev); 1563 return rc; 1564 } 1565 1566 return 0; 1567 } 1568 1569 int idxd_setup_sysfs(struct idxd_device *idxd) 1570 { 1571 struct device *dev = &idxd->pdev->dev; 1572 int rc; 1573 1574 rc = idxd_setup_device_sysfs(idxd); 1575 if (rc < 0) { 1576 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); 1577 return rc; 1578 } 1579 1580 rc = idxd_setup_wq_sysfs(idxd); 1581 if (rc < 0) { 1582 /* unregister conf dev */ 1583 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); 1584 return rc; 1585 } 1586 1587 rc = idxd_setup_group_sysfs(idxd); 1588 if (rc < 0) { 1589 /* unregister conf dev */ 1590 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); 1591 return rc; 1592 } 1593 1594 rc = idxd_setup_engine_sysfs(idxd); 1595 if (rc < 0) { 1596 /* unregister conf dev */ 1597 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); 1598 return rc; 1599 } 1600 1601 return 0; 1602 } 1603 1604 void idxd_cleanup_sysfs(struct idxd_device *idxd) 1605 { 1606 int i; 1607 1608 for (i = 0; i < idxd->max_wqs; i++) { 1609 struct idxd_wq *wq = &idxd->wqs[i]; 1610 1611 device_unregister(&wq->conf_dev); 1612 } 1613 1614 for (i = 0; i < idxd->max_engines; i++) { 1615 struct idxd_engine *engine = &idxd->engines[i]; 1616 1617 device_unregister(&engine->conf_dev); 1618 } 1619 1620 for (i = 0; i < idxd->max_groups; i++) { 1621 struct idxd_group *group = &idxd->groups[i]; 1622 1623 device_unregister(&group->conf_dev); 1624 } 1625 1626 device_unregister(&idxd->conf_dev); 1627 } 1628 1629 int idxd_register_bus_type(void) 1630 { 1631 int i, rc; 1632 1633 for (i = 0; i < IDXD_TYPE_MAX; i++) { 1634 rc = bus_register(idxd_bus_types[i]); 1635 if (rc < 0) 1636 goto bus_err; 1637 } 1638 1639 return 0; 1640 1641 bus_err: 1642 for (; i > 0; i--) 1643 bus_unregister(idxd_bus_types[i]); 1644 return rc; 1645 } 1646 1647 void idxd_unregister_bus_type(void) 1648 { 1649 int i; 1650 1651 for (i = 0; i < IDXD_TYPE_MAX; i++) 1652 bus_unregister(idxd_bus_types[i]); 1653 } 1654