1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/device.h> 8 #include <linux/io-64-nonatomic-lo-hi.h> 9 #include <uapi/linux/idxd.h> 10 #include "registers.h" 11 #include "idxd.h" 12 13 static char *idxd_wq_type_names[] = { 14 [IDXD_WQT_NONE] = "none", 15 [IDXD_WQT_KERNEL] = "kernel", 16 [IDXD_WQT_USER] = "user", 17 }; 18 19 static void idxd_conf_device_release(struct device *dev) 20 { 21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); 22 } 23 24 static struct device_type idxd_group_device_type = { 25 .name = "group", 26 .release = idxd_conf_device_release, 27 }; 28 29 static struct device_type idxd_wq_device_type = { 30 .name = "wq", 31 .release = idxd_conf_device_release, 32 }; 33 34 static struct device_type idxd_engine_device_type = { 35 .name = "engine", 36 .release = idxd_conf_device_release, 37 }; 38 39 static struct device_type dsa_device_type = { 40 .name = "dsa", 41 .release = idxd_conf_device_release, 42 }; 43 44 static struct device_type iax_device_type = { 45 .name = "iax", 46 .release = idxd_conf_device_release, 47 }; 48 49 static inline bool is_dsa_dev(struct device *dev) 50 { 51 return dev ? dev->type == &dsa_device_type : false; 52 } 53 54 static inline bool is_iax_dev(struct device *dev) 55 { 56 return dev ? dev->type == &iax_device_type : false; 57 } 58 59 static inline bool is_idxd_dev(struct device *dev) 60 { 61 return is_dsa_dev(dev) || is_iax_dev(dev); 62 } 63 64 static inline bool is_idxd_wq_dev(struct device *dev) 65 { 66 return dev ? dev->type == &idxd_wq_device_type : false; 67 } 68 69 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) 70 { 71 if (wq->type == IDXD_WQT_KERNEL && 72 strcmp(wq->name, "dmaengine") == 0) 73 return true; 74 return false; 75 } 76 77 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) 78 { 79 return wq->type == IDXD_WQT_USER; 80 } 81 82 static int idxd_config_bus_match(struct device *dev, 83 struct device_driver *drv) 84 { 85 int matched = 0; 86 87 if (is_idxd_dev(dev)) { 88 struct idxd_device *idxd = confdev_to_idxd(dev); 89 90 if (idxd->state != IDXD_DEV_CONF_READY) 91 return 0; 92 matched = 1; 93 } else if (is_idxd_wq_dev(dev)) { 94 struct idxd_wq *wq = confdev_to_wq(dev); 95 struct idxd_device *idxd = wq->idxd; 96 97 if (idxd->state < IDXD_DEV_CONF_READY) 98 return 0; 99 100 if (wq->state != IDXD_WQ_DISABLED) { 101 dev_dbg(dev, "%s not disabled\n", dev_name(dev)); 102 return 0; 103 } 104 matched = 1; 105 } 106 107 if (matched) 108 dev_dbg(dev, "%s matched\n", dev_name(dev)); 109 110 return matched; 111 } 112 113 static int idxd_config_bus_probe(struct device *dev) 114 { 115 int rc; 116 unsigned long flags; 117 118 dev_dbg(dev, "%s called\n", __func__); 119 120 if (is_idxd_dev(dev)) { 121 struct idxd_device *idxd = confdev_to_idxd(dev); 122 123 if (idxd->state != IDXD_DEV_CONF_READY) { 124 dev_warn(dev, "Device not ready for config\n"); 125 return -EBUSY; 126 } 127 128 if (!try_module_get(THIS_MODULE)) 129 return -ENXIO; 130 131 /* Perform IDXD configuration and enabling */ 132 spin_lock_irqsave(&idxd->dev_lock, flags); 133 rc = idxd_device_config(idxd); 134 spin_unlock_irqrestore(&idxd->dev_lock, flags); 135 if (rc < 0) { 136 module_put(THIS_MODULE); 137 dev_warn(dev, "Device config failed: %d\n", rc); 138 return rc; 139 } 140 141 /* start device */ 142 rc = idxd_device_enable(idxd); 143 if (rc < 0) { 144 module_put(THIS_MODULE); 145 dev_warn(dev, "Device enable failed: %d\n", rc); 146 return rc; 147 } 148 149 dev_info(dev, "Device %s enabled\n", dev_name(dev)); 150 151 rc = idxd_register_dma_device(idxd); 152 if (rc < 0) { 153 module_put(THIS_MODULE); 154 dev_dbg(dev, "Failed to register dmaengine device\n"); 155 return rc; 156 } 157 return 0; 158 } else if (is_idxd_wq_dev(dev)) { 159 struct idxd_wq *wq = confdev_to_wq(dev); 160 struct idxd_device *idxd = wq->idxd; 161 162 mutex_lock(&wq->wq_lock); 163 164 if (idxd->state != IDXD_DEV_ENABLED) { 165 mutex_unlock(&wq->wq_lock); 166 dev_warn(dev, "Enabling while device not enabled.\n"); 167 return -EPERM; 168 } 169 170 if (wq->state != IDXD_WQ_DISABLED) { 171 mutex_unlock(&wq->wq_lock); 172 dev_warn(dev, "WQ %d already enabled.\n", wq->id); 173 return -EBUSY; 174 } 175 176 if (!wq->group) { 177 mutex_unlock(&wq->wq_lock); 178 dev_warn(dev, "WQ not attached to group.\n"); 179 return -EINVAL; 180 } 181 182 if (strlen(wq->name) == 0) { 183 mutex_unlock(&wq->wq_lock); 184 dev_warn(dev, "WQ name not set.\n"); 185 return -EINVAL; 186 } 187 188 /* Shared WQ checks */ 189 if (wq_shared(wq)) { 190 if (!device_swq_supported(idxd)) { 191 dev_warn(dev, 192 "PASID not enabled and shared WQ.\n"); 193 mutex_unlock(&wq->wq_lock); 194 return -ENXIO; 195 } 196 /* 197 * Shared wq with the threshold set to 0 means the user 198 * did not set the threshold or transitioned from a 199 * dedicated wq but did not set threshold. A value 200 * of 0 would effectively disable the shared wq. The 201 * driver does not allow a value of 0 to be set for 202 * threshold via sysfs. 203 */ 204 if (wq->threshold == 0) { 205 dev_warn(dev, 206 "Shared WQ and threshold 0.\n"); 207 mutex_unlock(&wq->wq_lock); 208 return -EINVAL; 209 } 210 } 211 212 rc = idxd_wq_alloc_resources(wq); 213 if (rc < 0) { 214 mutex_unlock(&wq->wq_lock); 215 dev_warn(dev, "WQ resource alloc failed\n"); 216 return rc; 217 } 218 219 spin_lock_irqsave(&idxd->dev_lock, flags); 220 rc = idxd_device_config(idxd); 221 spin_unlock_irqrestore(&idxd->dev_lock, flags); 222 if (rc < 0) { 223 mutex_unlock(&wq->wq_lock); 224 dev_warn(dev, "Writing WQ %d config failed: %d\n", 225 wq->id, rc); 226 return rc; 227 } 228 229 rc = idxd_wq_enable(wq); 230 if (rc < 0) { 231 mutex_unlock(&wq->wq_lock); 232 dev_warn(dev, "WQ %d enabling failed: %d\n", 233 wq->id, rc); 234 return rc; 235 } 236 237 rc = idxd_wq_map_portal(wq); 238 if (rc < 0) { 239 dev_warn(dev, "wq portal mapping failed: %d\n", rc); 240 rc = idxd_wq_disable(wq); 241 if (rc < 0) 242 dev_warn(dev, "IDXD wq disable failed\n"); 243 mutex_unlock(&wq->wq_lock); 244 return rc; 245 } 246 247 wq->client_count = 0; 248 249 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); 250 251 if (is_idxd_wq_dmaengine(wq)) { 252 rc = idxd_register_dma_channel(wq); 253 if (rc < 0) { 254 dev_dbg(dev, "DMA channel register failed\n"); 255 mutex_unlock(&wq->wq_lock); 256 return rc; 257 } 258 } else if (is_idxd_wq_cdev(wq)) { 259 rc = idxd_wq_add_cdev(wq); 260 if (rc < 0) { 261 dev_dbg(dev, "Cdev creation failed\n"); 262 mutex_unlock(&wq->wq_lock); 263 return rc; 264 } 265 } 266 267 mutex_unlock(&wq->wq_lock); 268 return 0; 269 } 270 271 return -ENODEV; 272 } 273 274 static void disable_wq(struct idxd_wq *wq) 275 { 276 struct idxd_device *idxd = wq->idxd; 277 struct device *dev = &idxd->pdev->dev; 278 279 mutex_lock(&wq->wq_lock); 280 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); 281 if (wq->state == IDXD_WQ_DISABLED) { 282 mutex_unlock(&wq->wq_lock); 283 return; 284 } 285 286 if (is_idxd_wq_dmaengine(wq)) 287 idxd_unregister_dma_channel(wq); 288 else if (is_idxd_wq_cdev(wq)) 289 idxd_wq_del_cdev(wq); 290 291 if (idxd_wq_refcount(wq)) 292 dev_warn(dev, "Clients has claim on wq %d: %d\n", 293 wq->id, idxd_wq_refcount(wq)); 294 295 idxd_wq_unmap_portal(wq); 296 297 idxd_wq_drain(wq); 298 idxd_wq_reset(wq); 299 300 idxd_wq_free_resources(wq); 301 wq->client_count = 0; 302 mutex_unlock(&wq->wq_lock); 303 304 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); 305 } 306 307 static int idxd_config_bus_remove(struct device *dev) 308 { 309 int rc; 310 311 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); 312 313 /* disable workqueue here */ 314 if (is_idxd_wq_dev(dev)) { 315 struct idxd_wq *wq = confdev_to_wq(dev); 316 317 disable_wq(wq); 318 } else if (is_idxd_dev(dev)) { 319 struct idxd_device *idxd = confdev_to_idxd(dev); 320 int i; 321 322 dev_dbg(dev, "%s removing dev %s\n", __func__, 323 dev_name(&idxd->conf_dev)); 324 for (i = 0; i < idxd->max_wqs; i++) { 325 struct idxd_wq *wq = &idxd->wqs[i]; 326 327 if (wq->state == IDXD_WQ_DISABLED) 328 continue; 329 dev_warn(dev, "Active wq %d on disable %s.\n", i, 330 dev_name(&idxd->conf_dev)); 331 device_release_driver(&wq->conf_dev); 332 } 333 334 idxd_unregister_dma_device(idxd); 335 rc = idxd_device_disable(idxd); 336 for (i = 0; i < idxd->max_wqs; i++) { 337 struct idxd_wq *wq = &idxd->wqs[i]; 338 339 mutex_lock(&wq->wq_lock); 340 idxd_wq_disable_cleanup(wq); 341 mutex_unlock(&wq->wq_lock); 342 } 343 module_put(THIS_MODULE); 344 if (rc < 0) 345 dev_warn(dev, "Device disable failed\n"); 346 else 347 dev_info(dev, "Device %s disabled\n", dev_name(dev)); 348 349 } 350 351 return 0; 352 } 353 354 static void idxd_config_bus_shutdown(struct device *dev) 355 { 356 dev_dbg(dev, "%s called\n", __func__); 357 } 358 359 struct bus_type dsa_bus_type = { 360 .name = "dsa", 361 .match = idxd_config_bus_match, 362 .probe = idxd_config_bus_probe, 363 .remove = idxd_config_bus_remove, 364 .shutdown = idxd_config_bus_shutdown, 365 }; 366 367 struct bus_type iax_bus_type = { 368 .name = "iax", 369 .match = idxd_config_bus_match, 370 .probe = idxd_config_bus_probe, 371 .remove = idxd_config_bus_remove, 372 .shutdown = idxd_config_bus_shutdown, 373 }; 374 375 static struct bus_type *idxd_bus_types[] = { 376 &dsa_bus_type, 377 &iax_bus_type 378 }; 379 380 static struct idxd_device_driver dsa_drv = { 381 .drv = { 382 .name = "dsa", 383 .bus = &dsa_bus_type, 384 .owner = THIS_MODULE, 385 .mod_name = KBUILD_MODNAME, 386 }, 387 }; 388 389 static struct idxd_device_driver iax_drv = { 390 .drv = { 391 .name = "iax", 392 .bus = &iax_bus_type, 393 .owner = THIS_MODULE, 394 .mod_name = KBUILD_MODNAME, 395 }, 396 }; 397 398 static struct idxd_device_driver *idxd_drvs[] = { 399 &dsa_drv, 400 &iax_drv 401 }; 402 403 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) 404 { 405 return idxd_bus_types[idxd->type]; 406 } 407 408 static struct device_type *idxd_get_device_type(struct idxd_device *idxd) 409 { 410 if (idxd->type == IDXD_TYPE_DSA) 411 return &dsa_device_type; 412 else if (idxd->type == IDXD_TYPE_IAX) 413 return &iax_device_type; 414 else 415 return NULL; 416 } 417 418 /* IDXD generic driver setup */ 419 int idxd_register_driver(void) 420 { 421 int i, rc; 422 423 for (i = 0; i < IDXD_TYPE_MAX; i++) { 424 rc = driver_register(&idxd_drvs[i]->drv); 425 if (rc < 0) 426 goto drv_fail; 427 } 428 429 return 0; 430 431 drv_fail: 432 while (--i >= 0) 433 driver_unregister(&idxd_drvs[i]->drv); 434 return rc; 435 } 436 437 void idxd_unregister_driver(void) 438 { 439 int i; 440 441 for (i = 0; i < IDXD_TYPE_MAX; i++) 442 driver_unregister(&idxd_drvs[i]->drv); 443 } 444 445 /* IDXD engine attributes */ 446 static ssize_t engine_group_id_show(struct device *dev, 447 struct device_attribute *attr, char *buf) 448 { 449 struct idxd_engine *engine = 450 container_of(dev, struct idxd_engine, conf_dev); 451 452 if (engine->group) 453 return sprintf(buf, "%d\n", engine->group->id); 454 else 455 return sprintf(buf, "%d\n", -1); 456 } 457 458 static ssize_t engine_group_id_store(struct device *dev, 459 struct device_attribute *attr, 460 const char *buf, size_t count) 461 { 462 struct idxd_engine *engine = 463 container_of(dev, struct idxd_engine, conf_dev); 464 struct idxd_device *idxd = engine->idxd; 465 long id; 466 int rc; 467 struct idxd_group *prevg; 468 469 rc = kstrtol(buf, 10, &id); 470 if (rc < 0) 471 return -EINVAL; 472 473 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 474 return -EPERM; 475 476 if (id > idxd->max_groups - 1 || id < -1) 477 return -EINVAL; 478 479 if (id == -1) { 480 if (engine->group) { 481 engine->group->num_engines--; 482 engine->group = NULL; 483 } 484 return count; 485 } 486 487 prevg = engine->group; 488 489 if (prevg) 490 prevg->num_engines--; 491 engine->group = &idxd->groups[id]; 492 engine->group->num_engines++; 493 494 return count; 495 } 496 497 static struct device_attribute dev_attr_engine_group = 498 __ATTR(group_id, 0644, engine_group_id_show, 499 engine_group_id_store); 500 501 static struct attribute *idxd_engine_attributes[] = { 502 &dev_attr_engine_group.attr, 503 NULL, 504 }; 505 506 static const struct attribute_group idxd_engine_attribute_group = { 507 .attrs = idxd_engine_attributes, 508 }; 509 510 static const struct attribute_group *idxd_engine_attribute_groups[] = { 511 &idxd_engine_attribute_group, 512 NULL, 513 }; 514 515 /* Group attributes */ 516 517 static void idxd_set_free_tokens(struct idxd_device *idxd) 518 { 519 int i, tokens; 520 521 for (i = 0, tokens = 0; i < idxd->max_groups; i++) { 522 struct idxd_group *g = &idxd->groups[i]; 523 524 tokens += g->tokens_reserved; 525 } 526 527 idxd->nr_tokens = idxd->max_tokens - tokens; 528 } 529 530 static ssize_t group_tokens_reserved_show(struct device *dev, 531 struct device_attribute *attr, 532 char *buf) 533 { 534 struct idxd_group *group = 535 container_of(dev, struct idxd_group, conf_dev); 536 537 return sprintf(buf, "%u\n", group->tokens_reserved); 538 } 539 540 static ssize_t group_tokens_reserved_store(struct device *dev, 541 struct device_attribute *attr, 542 const char *buf, size_t count) 543 { 544 struct idxd_group *group = 545 container_of(dev, struct idxd_group, conf_dev); 546 struct idxd_device *idxd = group->idxd; 547 unsigned long val; 548 int rc; 549 550 rc = kstrtoul(buf, 10, &val); 551 if (rc < 0) 552 return -EINVAL; 553 554 if (idxd->type == IDXD_TYPE_IAX) 555 return -EOPNOTSUPP; 556 557 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 558 return -EPERM; 559 560 if (idxd->state == IDXD_DEV_ENABLED) 561 return -EPERM; 562 563 if (val > idxd->max_tokens) 564 return -EINVAL; 565 566 if (val > idxd->nr_tokens + group->tokens_reserved) 567 return -EINVAL; 568 569 group->tokens_reserved = val; 570 idxd_set_free_tokens(idxd); 571 return count; 572 } 573 574 static struct device_attribute dev_attr_group_tokens_reserved = 575 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, 576 group_tokens_reserved_store); 577 578 static ssize_t group_tokens_allowed_show(struct device *dev, 579 struct device_attribute *attr, 580 char *buf) 581 { 582 struct idxd_group *group = 583 container_of(dev, struct idxd_group, conf_dev); 584 585 return sprintf(buf, "%u\n", group->tokens_allowed); 586 } 587 588 static ssize_t group_tokens_allowed_store(struct device *dev, 589 struct device_attribute *attr, 590 const char *buf, size_t count) 591 { 592 struct idxd_group *group = 593 container_of(dev, struct idxd_group, conf_dev); 594 struct idxd_device *idxd = group->idxd; 595 unsigned long val; 596 int rc; 597 598 rc = kstrtoul(buf, 10, &val); 599 if (rc < 0) 600 return -EINVAL; 601 602 if (idxd->type == IDXD_TYPE_IAX) 603 return -EOPNOTSUPP; 604 605 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 606 return -EPERM; 607 608 if (idxd->state == IDXD_DEV_ENABLED) 609 return -EPERM; 610 611 if (val < 4 * group->num_engines || 612 val > group->tokens_reserved + idxd->nr_tokens) 613 return -EINVAL; 614 615 group->tokens_allowed = val; 616 return count; 617 } 618 619 static struct device_attribute dev_attr_group_tokens_allowed = 620 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, 621 group_tokens_allowed_store); 622 623 static ssize_t group_use_token_limit_show(struct device *dev, 624 struct device_attribute *attr, 625 char *buf) 626 { 627 struct idxd_group *group = 628 container_of(dev, struct idxd_group, conf_dev); 629 630 return sprintf(buf, "%u\n", group->use_token_limit); 631 } 632 633 static ssize_t group_use_token_limit_store(struct device *dev, 634 struct device_attribute *attr, 635 const char *buf, size_t count) 636 { 637 struct idxd_group *group = 638 container_of(dev, struct idxd_group, conf_dev); 639 struct idxd_device *idxd = group->idxd; 640 unsigned long val; 641 int rc; 642 643 rc = kstrtoul(buf, 10, &val); 644 if (rc < 0) 645 return -EINVAL; 646 647 if (idxd->type == IDXD_TYPE_IAX) 648 return -EOPNOTSUPP; 649 650 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 651 return -EPERM; 652 653 if (idxd->state == IDXD_DEV_ENABLED) 654 return -EPERM; 655 656 if (idxd->token_limit == 0) 657 return -EPERM; 658 659 group->use_token_limit = !!val; 660 return count; 661 } 662 663 static struct device_attribute dev_attr_group_use_token_limit = 664 __ATTR(use_token_limit, 0644, group_use_token_limit_show, 665 group_use_token_limit_store); 666 667 static ssize_t group_engines_show(struct device *dev, 668 struct device_attribute *attr, char *buf) 669 { 670 struct idxd_group *group = 671 container_of(dev, struct idxd_group, conf_dev); 672 int i, rc = 0; 673 char *tmp = buf; 674 struct idxd_device *idxd = group->idxd; 675 676 for (i = 0; i < idxd->max_engines; i++) { 677 struct idxd_engine *engine = &idxd->engines[i]; 678 679 if (!engine->group) 680 continue; 681 682 if (engine->group->id == group->id) 683 rc += sprintf(tmp + rc, "engine%d.%d ", 684 idxd->id, engine->id); 685 } 686 687 rc--; 688 rc += sprintf(tmp + rc, "\n"); 689 690 return rc; 691 } 692 693 static struct device_attribute dev_attr_group_engines = 694 __ATTR(engines, 0444, group_engines_show, NULL); 695 696 static ssize_t group_work_queues_show(struct device *dev, 697 struct device_attribute *attr, char *buf) 698 { 699 struct idxd_group *group = 700 container_of(dev, struct idxd_group, conf_dev); 701 int i, rc = 0; 702 char *tmp = buf; 703 struct idxd_device *idxd = group->idxd; 704 705 for (i = 0; i < idxd->max_wqs; i++) { 706 struct idxd_wq *wq = &idxd->wqs[i]; 707 708 if (!wq->group) 709 continue; 710 711 if (wq->group->id == group->id) 712 rc += sprintf(tmp + rc, "wq%d.%d ", 713 idxd->id, wq->id); 714 } 715 716 rc--; 717 rc += sprintf(tmp + rc, "\n"); 718 719 return rc; 720 } 721 722 static struct device_attribute dev_attr_group_work_queues = 723 __ATTR(work_queues, 0444, group_work_queues_show, NULL); 724 725 static ssize_t group_traffic_class_a_show(struct device *dev, 726 struct device_attribute *attr, 727 char *buf) 728 { 729 struct idxd_group *group = 730 container_of(dev, struct idxd_group, conf_dev); 731 732 return sprintf(buf, "%d\n", group->tc_a); 733 } 734 735 static ssize_t group_traffic_class_a_store(struct device *dev, 736 struct device_attribute *attr, 737 const char *buf, size_t count) 738 { 739 struct idxd_group *group = 740 container_of(dev, struct idxd_group, conf_dev); 741 struct idxd_device *idxd = group->idxd; 742 long val; 743 int rc; 744 745 rc = kstrtol(buf, 10, &val); 746 if (rc < 0) 747 return -EINVAL; 748 749 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 750 return -EPERM; 751 752 if (idxd->state == IDXD_DEV_ENABLED) 753 return -EPERM; 754 755 if (val < 0 || val > 7) 756 return -EINVAL; 757 758 group->tc_a = val; 759 return count; 760 } 761 762 static struct device_attribute dev_attr_group_traffic_class_a = 763 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, 764 group_traffic_class_a_store); 765 766 static ssize_t group_traffic_class_b_show(struct device *dev, 767 struct device_attribute *attr, 768 char *buf) 769 { 770 struct idxd_group *group = 771 container_of(dev, struct idxd_group, conf_dev); 772 773 return sprintf(buf, "%d\n", group->tc_b); 774 } 775 776 static ssize_t group_traffic_class_b_store(struct device *dev, 777 struct device_attribute *attr, 778 const char *buf, size_t count) 779 { 780 struct idxd_group *group = 781 container_of(dev, struct idxd_group, conf_dev); 782 struct idxd_device *idxd = group->idxd; 783 long val; 784 int rc; 785 786 rc = kstrtol(buf, 10, &val); 787 if (rc < 0) 788 return -EINVAL; 789 790 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 791 return -EPERM; 792 793 if (idxd->state == IDXD_DEV_ENABLED) 794 return -EPERM; 795 796 if (val < 0 || val > 7) 797 return -EINVAL; 798 799 group->tc_b = val; 800 return count; 801 } 802 803 static struct device_attribute dev_attr_group_traffic_class_b = 804 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, 805 group_traffic_class_b_store); 806 807 static struct attribute *idxd_group_attributes[] = { 808 &dev_attr_group_work_queues.attr, 809 &dev_attr_group_engines.attr, 810 &dev_attr_group_use_token_limit.attr, 811 &dev_attr_group_tokens_allowed.attr, 812 &dev_attr_group_tokens_reserved.attr, 813 &dev_attr_group_traffic_class_a.attr, 814 &dev_attr_group_traffic_class_b.attr, 815 NULL, 816 }; 817 818 static const struct attribute_group idxd_group_attribute_group = { 819 .attrs = idxd_group_attributes, 820 }; 821 822 static const struct attribute_group *idxd_group_attribute_groups[] = { 823 &idxd_group_attribute_group, 824 NULL, 825 }; 826 827 /* IDXD work queue attribs */ 828 static ssize_t wq_clients_show(struct device *dev, 829 struct device_attribute *attr, char *buf) 830 { 831 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 832 833 return sprintf(buf, "%d\n", wq->client_count); 834 } 835 836 static struct device_attribute dev_attr_wq_clients = 837 __ATTR(clients, 0444, wq_clients_show, NULL); 838 839 static ssize_t wq_state_show(struct device *dev, 840 struct device_attribute *attr, char *buf) 841 { 842 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 843 844 switch (wq->state) { 845 case IDXD_WQ_DISABLED: 846 return sprintf(buf, "disabled\n"); 847 case IDXD_WQ_ENABLED: 848 return sprintf(buf, "enabled\n"); 849 } 850 851 return sprintf(buf, "unknown\n"); 852 } 853 854 static struct device_attribute dev_attr_wq_state = 855 __ATTR(state, 0444, wq_state_show, NULL); 856 857 static ssize_t wq_group_id_show(struct device *dev, 858 struct device_attribute *attr, char *buf) 859 { 860 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 861 862 if (wq->group) 863 return sprintf(buf, "%u\n", wq->group->id); 864 else 865 return sprintf(buf, "-1\n"); 866 } 867 868 static ssize_t wq_group_id_store(struct device *dev, 869 struct device_attribute *attr, 870 const char *buf, size_t count) 871 { 872 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 873 struct idxd_device *idxd = wq->idxd; 874 long id; 875 int rc; 876 struct idxd_group *prevg, *group; 877 878 rc = kstrtol(buf, 10, &id); 879 if (rc < 0) 880 return -EINVAL; 881 882 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 883 return -EPERM; 884 885 if (wq->state != IDXD_WQ_DISABLED) 886 return -EPERM; 887 888 if (id > idxd->max_groups - 1 || id < -1) 889 return -EINVAL; 890 891 if (id == -1) { 892 if (wq->group) { 893 wq->group->num_wqs--; 894 wq->group = NULL; 895 } 896 return count; 897 } 898 899 group = &idxd->groups[id]; 900 prevg = wq->group; 901 902 if (prevg) 903 prevg->num_wqs--; 904 wq->group = group; 905 group->num_wqs++; 906 return count; 907 } 908 909 static struct device_attribute dev_attr_wq_group_id = 910 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); 911 912 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, 913 char *buf) 914 { 915 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 916 917 return sprintf(buf, "%s\n", 918 wq_dedicated(wq) ? "dedicated" : "shared"); 919 } 920 921 static ssize_t wq_mode_store(struct device *dev, 922 struct device_attribute *attr, const char *buf, 923 size_t count) 924 { 925 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 926 struct idxd_device *idxd = wq->idxd; 927 928 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 929 return -EPERM; 930 931 if (wq->state != IDXD_WQ_DISABLED) 932 return -EPERM; 933 934 if (sysfs_streq(buf, "dedicated")) { 935 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 936 wq->threshold = 0; 937 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) { 938 clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 939 } else { 940 return -EINVAL; 941 } 942 943 return count; 944 } 945 946 static struct device_attribute dev_attr_wq_mode = 947 __ATTR(mode, 0644, wq_mode_show, wq_mode_store); 948 949 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, 950 char *buf) 951 { 952 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 953 954 return sprintf(buf, "%u\n", wq->size); 955 } 956 957 static int total_claimed_wq_size(struct idxd_device *idxd) 958 { 959 int i; 960 int wq_size = 0; 961 962 for (i = 0; i < idxd->max_wqs; i++) { 963 struct idxd_wq *wq = &idxd->wqs[i]; 964 965 wq_size += wq->size; 966 } 967 968 return wq_size; 969 } 970 971 static ssize_t wq_size_store(struct device *dev, 972 struct device_attribute *attr, const char *buf, 973 size_t count) 974 { 975 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 976 unsigned long size; 977 struct idxd_device *idxd = wq->idxd; 978 int rc; 979 980 rc = kstrtoul(buf, 10, &size); 981 if (rc < 0) 982 return -EINVAL; 983 984 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 985 return -EPERM; 986 987 if (idxd->state == IDXD_DEV_ENABLED) 988 return -EPERM; 989 990 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) 991 return -EINVAL; 992 993 wq->size = size; 994 return count; 995 } 996 997 static struct device_attribute dev_attr_wq_size = 998 __ATTR(size, 0644, wq_size_show, wq_size_store); 999 1000 static ssize_t wq_priority_show(struct device *dev, 1001 struct device_attribute *attr, char *buf) 1002 { 1003 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1004 1005 return sprintf(buf, "%u\n", wq->priority); 1006 } 1007 1008 static ssize_t wq_priority_store(struct device *dev, 1009 struct device_attribute *attr, 1010 const char *buf, size_t count) 1011 { 1012 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1013 unsigned long prio; 1014 struct idxd_device *idxd = wq->idxd; 1015 int rc; 1016 1017 rc = kstrtoul(buf, 10, &prio); 1018 if (rc < 0) 1019 return -EINVAL; 1020 1021 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1022 return -EPERM; 1023 1024 if (wq->state != IDXD_WQ_DISABLED) 1025 return -EPERM; 1026 1027 if (prio > IDXD_MAX_PRIORITY) 1028 return -EINVAL; 1029 1030 wq->priority = prio; 1031 return count; 1032 } 1033 1034 static struct device_attribute dev_attr_wq_priority = 1035 __ATTR(priority, 0644, wq_priority_show, wq_priority_store); 1036 1037 static ssize_t wq_block_on_fault_show(struct device *dev, 1038 struct device_attribute *attr, char *buf) 1039 { 1040 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1041 1042 return sprintf(buf, "%u\n", 1043 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); 1044 } 1045 1046 static ssize_t wq_block_on_fault_store(struct device *dev, 1047 struct device_attribute *attr, 1048 const char *buf, size_t count) 1049 { 1050 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1051 struct idxd_device *idxd = wq->idxd; 1052 bool bof; 1053 int rc; 1054 1055 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1056 return -EPERM; 1057 1058 if (wq->state != IDXD_WQ_DISABLED) 1059 return -ENXIO; 1060 1061 rc = kstrtobool(buf, &bof); 1062 if (rc < 0) 1063 return rc; 1064 1065 if (bof) 1066 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 1067 else 1068 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 1069 1070 return count; 1071 } 1072 1073 static struct device_attribute dev_attr_wq_block_on_fault = 1074 __ATTR(block_on_fault, 0644, wq_block_on_fault_show, 1075 wq_block_on_fault_store); 1076 1077 static ssize_t wq_threshold_show(struct device *dev, 1078 struct device_attribute *attr, char *buf) 1079 { 1080 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1081 1082 return sprintf(buf, "%u\n", wq->threshold); 1083 } 1084 1085 static ssize_t wq_threshold_store(struct device *dev, 1086 struct device_attribute *attr, 1087 const char *buf, size_t count) 1088 { 1089 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1090 struct idxd_device *idxd = wq->idxd; 1091 unsigned int val; 1092 int rc; 1093 1094 rc = kstrtouint(buf, 0, &val); 1095 if (rc < 0) 1096 return -EINVAL; 1097 1098 if (val > wq->size || val <= 0) 1099 return -EINVAL; 1100 1101 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1102 return -EPERM; 1103 1104 if (wq->state != IDXD_WQ_DISABLED) 1105 return -ENXIO; 1106 1107 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags)) 1108 return -EINVAL; 1109 1110 wq->threshold = val; 1111 1112 return count; 1113 } 1114 1115 static struct device_attribute dev_attr_wq_threshold = 1116 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store); 1117 1118 static ssize_t wq_type_show(struct device *dev, 1119 struct device_attribute *attr, char *buf) 1120 { 1121 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1122 1123 switch (wq->type) { 1124 case IDXD_WQT_KERNEL: 1125 return sprintf(buf, "%s\n", 1126 idxd_wq_type_names[IDXD_WQT_KERNEL]); 1127 case IDXD_WQT_USER: 1128 return sprintf(buf, "%s\n", 1129 idxd_wq_type_names[IDXD_WQT_USER]); 1130 case IDXD_WQT_NONE: 1131 default: 1132 return sprintf(buf, "%s\n", 1133 idxd_wq_type_names[IDXD_WQT_NONE]); 1134 } 1135 1136 return -EINVAL; 1137 } 1138 1139 static ssize_t wq_type_store(struct device *dev, 1140 struct device_attribute *attr, const char *buf, 1141 size_t count) 1142 { 1143 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1144 enum idxd_wq_type old_type; 1145 1146 if (wq->state != IDXD_WQ_DISABLED) 1147 return -EPERM; 1148 1149 old_type = wq->type; 1150 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) 1151 wq->type = IDXD_WQT_NONE; 1152 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) 1153 wq->type = IDXD_WQT_KERNEL; 1154 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) 1155 wq->type = IDXD_WQT_USER; 1156 else 1157 return -EINVAL; 1158 1159 /* If we are changing queue type, clear the name */ 1160 if (wq->type != old_type) 1161 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1162 1163 return count; 1164 } 1165 1166 static struct device_attribute dev_attr_wq_type = 1167 __ATTR(type, 0644, wq_type_show, wq_type_store); 1168 1169 static ssize_t wq_name_show(struct device *dev, 1170 struct device_attribute *attr, char *buf) 1171 { 1172 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1173 1174 return sprintf(buf, "%s\n", wq->name); 1175 } 1176 1177 static ssize_t wq_name_store(struct device *dev, 1178 struct device_attribute *attr, const char *buf, 1179 size_t count) 1180 { 1181 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1182 1183 if (wq->state != IDXD_WQ_DISABLED) 1184 return -EPERM; 1185 1186 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) 1187 return -EINVAL; 1188 1189 /* 1190 * This is temporarily placed here until we have SVM support for 1191 * dmaengine. 1192 */ 1193 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) 1194 return -EOPNOTSUPP; 1195 1196 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1197 strncpy(wq->name, buf, WQ_NAME_SIZE); 1198 strreplace(wq->name, '\n', '\0'); 1199 return count; 1200 } 1201 1202 static struct device_attribute dev_attr_wq_name = 1203 __ATTR(name, 0644, wq_name_show, wq_name_store); 1204 1205 static ssize_t wq_cdev_minor_show(struct device *dev, 1206 struct device_attribute *attr, char *buf) 1207 { 1208 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1209 1210 return sprintf(buf, "%d\n", wq->idxd_cdev.minor); 1211 } 1212 1213 static struct device_attribute dev_attr_wq_cdev_minor = 1214 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); 1215 1216 static int __get_sysfs_u64(const char *buf, u64 *val) 1217 { 1218 int rc; 1219 1220 rc = kstrtou64(buf, 0, val); 1221 if (rc < 0) 1222 return -EINVAL; 1223 1224 if (*val == 0) 1225 return -EINVAL; 1226 1227 *val = roundup_pow_of_two(*val); 1228 return 0; 1229 } 1230 1231 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr, 1232 char *buf) 1233 { 1234 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1235 1236 return sprintf(buf, "%llu\n", wq->max_xfer_bytes); 1237 } 1238 1239 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, 1240 const char *buf, size_t count) 1241 { 1242 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1243 struct idxd_device *idxd = wq->idxd; 1244 u64 xfer_size; 1245 int rc; 1246 1247 if (wq->state != IDXD_WQ_DISABLED) 1248 return -EPERM; 1249 1250 rc = __get_sysfs_u64(buf, &xfer_size); 1251 if (rc < 0) 1252 return rc; 1253 1254 if (xfer_size > idxd->max_xfer_bytes) 1255 return -EINVAL; 1256 1257 wq->max_xfer_bytes = xfer_size; 1258 1259 return count; 1260 } 1261 1262 static struct device_attribute dev_attr_wq_max_transfer_size = 1263 __ATTR(max_transfer_size, 0644, 1264 wq_max_transfer_size_show, wq_max_transfer_size_store); 1265 1266 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf) 1267 { 1268 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1269 1270 return sprintf(buf, "%u\n", wq->max_batch_size); 1271 } 1272 1273 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, 1274 const char *buf, size_t count) 1275 { 1276 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1277 struct idxd_device *idxd = wq->idxd; 1278 u64 batch_size; 1279 int rc; 1280 1281 if (wq->state != IDXD_WQ_DISABLED) 1282 return -EPERM; 1283 1284 rc = __get_sysfs_u64(buf, &batch_size); 1285 if (rc < 0) 1286 return rc; 1287 1288 if (batch_size > idxd->max_batch_size) 1289 return -EINVAL; 1290 1291 wq->max_batch_size = (u32)batch_size; 1292 1293 return count; 1294 } 1295 1296 static struct device_attribute dev_attr_wq_max_batch_size = 1297 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store); 1298 1299 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf) 1300 { 1301 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1302 1303 return sprintf(buf, "%u\n", wq->ats_dis); 1304 } 1305 1306 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr, 1307 const char *buf, size_t count) 1308 { 1309 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1310 struct idxd_device *idxd = wq->idxd; 1311 bool ats_dis; 1312 int rc; 1313 1314 if (wq->state != IDXD_WQ_DISABLED) 1315 return -EPERM; 1316 1317 if (!idxd->hw.wq_cap.wq_ats_support) 1318 return -EOPNOTSUPP; 1319 1320 rc = kstrtobool(buf, &ats_dis); 1321 if (rc < 0) 1322 return rc; 1323 1324 wq->ats_dis = ats_dis; 1325 1326 return count; 1327 } 1328 1329 static struct device_attribute dev_attr_wq_ats_disable = 1330 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store); 1331 1332 static struct attribute *idxd_wq_attributes[] = { 1333 &dev_attr_wq_clients.attr, 1334 &dev_attr_wq_state.attr, 1335 &dev_attr_wq_group_id.attr, 1336 &dev_attr_wq_mode.attr, 1337 &dev_attr_wq_size.attr, 1338 &dev_attr_wq_priority.attr, 1339 &dev_attr_wq_block_on_fault.attr, 1340 &dev_attr_wq_threshold.attr, 1341 &dev_attr_wq_type.attr, 1342 &dev_attr_wq_name.attr, 1343 &dev_attr_wq_cdev_minor.attr, 1344 &dev_attr_wq_max_transfer_size.attr, 1345 &dev_attr_wq_max_batch_size.attr, 1346 &dev_attr_wq_ats_disable.attr, 1347 NULL, 1348 }; 1349 1350 static const struct attribute_group idxd_wq_attribute_group = { 1351 .attrs = idxd_wq_attributes, 1352 }; 1353 1354 static const struct attribute_group *idxd_wq_attribute_groups[] = { 1355 &idxd_wq_attribute_group, 1356 NULL, 1357 }; 1358 1359 /* IDXD device attribs */ 1360 static ssize_t version_show(struct device *dev, struct device_attribute *attr, 1361 char *buf) 1362 { 1363 struct idxd_device *idxd = 1364 container_of(dev, struct idxd_device, conf_dev); 1365 1366 return sprintf(buf, "%#x\n", idxd->hw.version); 1367 } 1368 static DEVICE_ATTR_RO(version); 1369 1370 static ssize_t max_work_queues_size_show(struct device *dev, 1371 struct device_attribute *attr, 1372 char *buf) 1373 { 1374 struct idxd_device *idxd = 1375 container_of(dev, struct idxd_device, conf_dev); 1376 1377 return sprintf(buf, "%u\n", idxd->max_wq_size); 1378 } 1379 static DEVICE_ATTR_RO(max_work_queues_size); 1380 1381 static ssize_t max_groups_show(struct device *dev, 1382 struct device_attribute *attr, char *buf) 1383 { 1384 struct idxd_device *idxd = 1385 container_of(dev, struct idxd_device, conf_dev); 1386 1387 return sprintf(buf, "%u\n", idxd->max_groups); 1388 } 1389 static DEVICE_ATTR_RO(max_groups); 1390 1391 static ssize_t max_work_queues_show(struct device *dev, 1392 struct device_attribute *attr, char *buf) 1393 { 1394 struct idxd_device *idxd = 1395 container_of(dev, struct idxd_device, conf_dev); 1396 1397 return sprintf(buf, "%u\n", idxd->max_wqs); 1398 } 1399 static DEVICE_ATTR_RO(max_work_queues); 1400 1401 static ssize_t max_engines_show(struct device *dev, 1402 struct device_attribute *attr, char *buf) 1403 { 1404 struct idxd_device *idxd = 1405 container_of(dev, struct idxd_device, conf_dev); 1406 1407 return sprintf(buf, "%u\n", idxd->max_engines); 1408 } 1409 static DEVICE_ATTR_RO(max_engines); 1410 1411 static ssize_t numa_node_show(struct device *dev, 1412 struct device_attribute *attr, char *buf) 1413 { 1414 struct idxd_device *idxd = 1415 container_of(dev, struct idxd_device, conf_dev); 1416 1417 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1418 } 1419 static DEVICE_ATTR_RO(numa_node); 1420 1421 static ssize_t max_batch_size_show(struct device *dev, 1422 struct device_attribute *attr, char *buf) 1423 { 1424 struct idxd_device *idxd = 1425 container_of(dev, struct idxd_device, conf_dev); 1426 1427 return sprintf(buf, "%u\n", idxd->max_batch_size); 1428 } 1429 static DEVICE_ATTR_RO(max_batch_size); 1430 1431 static ssize_t max_transfer_size_show(struct device *dev, 1432 struct device_attribute *attr, 1433 char *buf) 1434 { 1435 struct idxd_device *idxd = 1436 container_of(dev, struct idxd_device, conf_dev); 1437 1438 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); 1439 } 1440 static DEVICE_ATTR_RO(max_transfer_size); 1441 1442 static ssize_t op_cap_show(struct device *dev, 1443 struct device_attribute *attr, char *buf) 1444 { 1445 struct idxd_device *idxd = 1446 container_of(dev, struct idxd_device, conf_dev); 1447 int i, rc = 0; 1448 1449 for (i = 0; i < 4; i++) 1450 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]); 1451 1452 rc--; 1453 rc += sysfs_emit_at(buf, rc, "\n"); 1454 return rc; 1455 } 1456 static DEVICE_ATTR_RO(op_cap); 1457 1458 static ssize_t gen_cap_show(struct device *dev, 1459 struct device_attribute *attr, char *buf) 1460 { 1461 struct idxd_device *idxd = 1462 container_of(dev, struct idxd_device, conf_dev); 1463 1464 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); 1465 } 1466 static DEVICE_ATTR_RO(gen_cap); 1467 1468 static ssize_t configurable_show(struct device *dev, 1469 struct device_attribute *attr, char *buf) 1470 { 1471 struct idxd_device *idxd = 1472 container_of(dev, struct idxd_device, conf_dev); 1473 1474 return sprintf(buf, "%u\n", 1475 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1476 } 1477 static DEVICE_ATTR_RO(configurable); 1478 1479 static ssize_t clients_show(struct device *dev, 1480 struct device_attribute *attr, char *buf) 1481 { 1482 struct idxd_device *idxd = 1483 container_of(dev, struct idxd_device, conf_dev); 1484 unsigned long flags; 1485 int count = 0, i; 1486 1487 spin_lock_irqsave(&idxd->dev_lock, flags); 1488 for (i = 0; i < idxd->max_wqs; i++) { 1489 struct idxd_wq *wq = &idxd->wqs[i]; 1490 1491 count += wq->client_count; 1492 } 1493 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1494 1495 return sprintf(buf, "%d\n", count); 1496 } 1497 static DEVICE_ATTR_RO(clients); 1498 1499 static ssize_t pasid_enabled_show(struct device *dev, 1500 struct device_attribute *attr, char *buf) 1501 { 1502 struct idxd_device *idxd = 1503 container_of(dev, struct idxd_device, conf_dev); 1504 1505 return sprintf(buf, "%u\n", device_pasid_enabled(idxd)); 1506 } 1507 static DEVICE_ATTR_RO(pasid_enabled); 1508 1509 static ssize_t state_show(struct device *dev, 1510 struct device_attribute *attr, char *buf) 1511 { 1512 struct idxd_device *idxd = 1513 container_of(dev, struct idxd_device, conf_dev); 1514 1515 switch (idxd->state) { 1516 case IDXD_DEV_DISABLED: 1517 case IDXD_DEV_CONF_READY: 1518 return sprintf(buf, "disabled\n"); 1519 case IDXD_DEV_ENABLED: 1520 return sprintf(buf, "enabled\n"); 1521 case IDXD_DEV_HALTED: 1522 return sprintf(buf, "halted\n"); 1523 } 1524 1525 return sprintf(buf, "unknown\n"); 1526 } 1527 static DEVICE_ATTR_RO(state); 1528 1529 static ssize_t errors_show(struct device *dev, 1530 struct device_attribute *attr, char *buf) 1531 { 1532 struct idxd_device *idxd = 1533 container_of(dev, struct idxd_device, conf_dev); 1534 int i, out = 0; 1535 unsigned long flags; 1536 1537 spin_lock_irqsave(&idxd->dev_lock, flags); 1538 for (i = 0; i < 4; i++) 1539 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); 1540 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1541 out--; 1542 out += sprintf(buf + out, "\n"); 1543 return out; 1544 } 1545 static DEVICE_ATTR_RO(errors); 1546 1547 static ssize_t max_tokens_show(struct device *dev, 1548 struct device_attribute *attr, char *buf) 1549 { 1550 struct idxd_device *idxd = 1551 container_of(dev, struct idxd_device, conf_dev); 1552 1553 return sprintf(buf, "%u\n", idxd->max_tokens); 1554 } 1555 static DEVICE_ATTR_RO(max_tokens); 1556 1557 static ssize_t token_limit_show(struct device *dev, 1558 struct device_attribute *attr, char *buf) 1559 { 1560 struct idxd_device *idxd = 1561 container_of(dev, struct idxd_device, conf_dev); 1562 1563 return sprintf(buf, "%u\n", idxd->token_limit); 1564 } 1565 1566 static ssize_t token_limit_store(struct device *dev, 1567 struct device_attribute *attr, 1568 const char *buf, size_t count) 1569 { 1570 struct idxd_device *idxd = 1571 container_of(dev, struct idxd_device, conf_dev); 1572 unsigned long val; 1573 int rc; 1574 1575 rc = kstrtoul(buf, 10, &val); 1576 if (rc < 0) 1577 return -EINVAL; 1578 1579 if (idxd->state == IDXD_DEV_ENABLED) 1580 return -EPERM; 1581 1582 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1583 return -EPERM; 1584 1585 if (!idxd->hw.group_cap.token_limit) 1586 return -EPERM; 1587 1588 if (val > idxd->hw.group_cap.total_tokens) 1589 return -EINVAL; 1590 1591 idxd->token_limit = val; 1592 return count; 1593 } 1594 static DEVICE_ATTR_RW(token_limit); 1595 1596 static ssize_t cdev_major_show(struct device *dev, 1597 struct device_attribute *attr, char *buf) 1598 { 1599 struct idxd_device *idxd = 1600 container_of(dev, struct idxd_device, conf_dev); 1601 1602 return sprintf(buf, "%u\n", idxd->major); 1603 } 1604 static DEVICE_ATTR_RO(cdev_major); 1605 1606 static ssize_t cmd_status_show(struct device *dev, 1607 struct device_attribute *attr, char *buf) 1608 { 1609 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); 1610 1611 return sprintf(buf, "%#x\n", idxd->cmd_status); 1612 } 1613 static DEVICE_ATTR_RO(cmd_status); 1614 1615 static struct attribute *idxd_device_attributes[] = { 1616 &dev_attr_version.attr, 1617 &dev_attr_max_groups.attr, 1618 &dev_attr_max_work_queues.attr, 1619 &dev_attr_max_work_queues_size.attr, 1620 &dev_attr_max_engines.attr, 1621 &dev_attr_numa_node.attr, 1622 &dev_attr_max_batch_size.attr, 1623 &dev_attr_max_transfer_size.attr, 1624 &dev_attr_op_cap.attr, 1625 &dev_attr_gen_cap.attr, 1626 &dev_attr_configurable.attr, 1627 &dev_attr_clients.attr, 1628 &dev_attr_pasid_enabled.attr, 1629 &dev_attr_state.attr, 1630 &dev_attr_errors.attr, 1631 &dev_attr_max_tokens.attr, 1632 &dev_attr_token_limit.attr, 1633 &dev_attr_cdev_major.attr, 1634 &dev_attr_cmd_status.attr, 1635 NULL, 1636 }; 1637 1638 static const struct attribute_group idxd_device_attribute_group = { 1639 .attrs = idxd_device_attributes, 1640 }; 1641 1642 static const struct attribute_group *idxd_attribute_groups[] = { 1643 &idxd_device_attribute_group, 1644 NULL, 1645 }; 1646 1647 static int idxd_setup_engine_sysfs(struct idxd_device *idxd) 1648 { 1649 struct device *dev = &idxd->pdev->dev; 1650 int i, rc; 1651 1652 for (i = 0; i < idxd->max_engines; i++) { 1653 struct idxd_engine *engine = &idxd->engines[i]; 1654 1655 engine->conf_dev.parent = &idxd->conf_dev; 1656 dev_set_name(&engine->conf_dev, "engine%d.%d", 1657 idxd->id, engine->id); 1658 engine->conf_dev.bus = idxd_get_bus_type(idxd); 1659 engine->conf_dev.groups = idxd_engine_attribute_groups; 1660 engine->conf_dev.type = &idxd_engine_device_type; 1661 dev_dbg(dev, "Engine device register: %s\n", 1662 dev_name(&engine->conf_dev)); 1663 rc = device_register(&engine->conf_dev); 1664 if (rc < 0) { 1665 put_device(&engine->conf_dev); 1666 goto cleanup; 1667 } 1668 } 1669 1670 return 0; 1671 1672 cleanup: 1673 while (i--) { 1674 struct idxd_engine *engine = &idxd->engines[i]; 1675 1676 device_unregister(&engine->conf_dev); 1677 } 1678 return rc; 1679 } 1680 1681 static int idxd_setup_group_sysfs(struct idxd_device *idxd) 1682 { 1683 struct device *dev = &idxd->pdev->dev; 1684 int i, rc; 1685 1686 for (i = 0; i < idxd->max_groups; i++) { 1687 struct idxd_group *group = &idxd->groups[i]; 1688 1689 group->conf_dev.parent = &idxd->conf_dev; 1690 dev_set_name(&group->conf_dev, "group%d.%d", 1691 idxd->id, group->id); 1692 group->conf_dev.bus = idxd_get_bus_type(idxd); 1693 group->conf_dev.groups = idxd_group_attribute_groups; 1694 group->conf_dev.type = &idxd_group_device_type; 1695 dev_dbg(dev, "Group device register: %s\n", 1696 dev_name(&group->conf_dev)); 1697 rc = device_register(&group->conf_dev); 1698 if (rc < 0) { 1699 put_device(&group->conf_dev); 1700 goto cleanup; 1701 } 1702 } 1703 1704 return 0; 1705 1706 cleanup: 1707 while (i--) { 1708 struct idxd_group *group = &idxd->groups[i]; 1709 1710 device_unregister(&group->conf_dev); 1711 } 1712 return rc; 1713 } 1714 1715 static int idxd_setup_wq_sysfs(struct idxd_device *idxd) 1716 { 1717 struct device *dev = &idxd->pdev->dev; 1718 int i, rc; 1719 1720 for (i = 0; i < idxd->max_wqs; i++) { 1721 struct idxd_wq *wq = &idxd->wqs[i]; 1722 1723 wq->conf_dev.parent = &idxd->conf_dev; 1724 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); 1725 wq->conf_dev.bus = idxd_get_bus_type(idxd); 1726 wq->conf_dev.groups = idxd_wq_attribute_groups; 1727 wq->conf_dev.type = &idxd_wq_device_type; 1728 dev_dbg(dev, "WQ device register: %s\n", 1729 dev_name(&wq->conf_dev)); 1730 rc = device_register(&wq->conf_dev); 1731 if (rc < 0) { 1732 put_device(&wq->conf_dev); 1733 goto cleanup; 1734 } 1735 } 1736 1737 return 0; 1738 1739 cleanup: 1740 while (i--) { 1741 struct idxd_wq *wq = &idxd->wqs[i]; 1742 1743 device_unregister(&wq->conf_dev); 1744 } 1745 return rc; 1746 } 1747 1748 static int idxd_setup_device_sysfs(struct idxd_device *idxd) 1749 { 1750 struct device *dev = &idxd->pdev->dev; 1751 int rc; 1752 char devname[IDXD_NAME_SIZE]; 1753 1754 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); 1755 idxd->conf_dev.parent = dev; 1756 dev_set_name(&idxd->conf_dev, "%s", devname); 1757 idxd->conf_dev.bus = idxd_get_bus_type(idxd); 1758 idxd->conf_dev.groups = idxd_attribute_groups; 1759 idxd->conf_dev.type = idxd_get_device_type(idxd); 1760 1761 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); 1762 rc = device_register(&idxd->conf_dev); 1763 if (rc < 0) { 1764 put_device(&idxd->conf_dev); 1765 return rc; 1766 } 1767 1768 return 0; 1769 } 1770 1771 int idxd_setup_sysfs(struct idxd_device *idxd) 1772 { 1773 struct device *dev = &idxd->pdev->dev; 1774 int rc; 1775 1776 rc = idxd_setup_device_sysfs(idxd); 1777 if (rc < 0) { 1778 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); 1779 return rc; 1780 } 1781 1782 rc = idxd_setup_wq_sysfs(idxd); 1783 if (rc < 0) { 1784 /* unregister conf dev */ 1785 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); 1786 return rc; 1787 } 1788 1789 rc = idxd_setup_group_sysfs(idxd); 1790 if (rc < 0) { 1791 /* unregister conf dev */ 1792 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); 1793 return rc; 1794 } 1795 1796 rc = idxd_setup_engine_sysfs(idxd); 1797 if (rc < 0) { 1798 /* unregister conf dev */ 1799 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); 1800 return rc; 1801 } 1802 1803 return 0; 1804 } 1805 1806 void idxd_cleanup_sysfs(struct idxd_device *idxd) 1807 { 1808 int i; 1809 1810 for (i = 0; i < idxd->max_wqs; i++) { 1811 struct idxd_wq *wq = &idxd->wqs[i]; 1812 1813 device_unregister(&wq->conf_dev); 1814 } 1815 1816 for (i = 0; i < idxd->max_engines; i++) { 1817 struct idxd_engine *engine = &idxd->engines[i]; 1818 1819 device_unregister(&engine->conf_dev); 1820 } 1821 1822 for (i = 0; i < idxd->max_groups; i++) { 1823 struct idxd_group *group = &idxd->groups[i]; 1824 1825 device_unregister(&group->conf_dev); 1826 } 1827 1828 device_unregister(&idxd->conf_dev); 1829 } 1830 1831 int idxd_register_bus_type(void) 1832 { 1833 int i, rc; 1834 1835 for (i = 0; i < IDXD_TYPE_MAX; i++) { 1836 rc = bus_register(idxd_bus_types[i]); 1837 if (rc < 0) 1838 goto bus_err; 1839 } 1840 1841 return 0; 1842 1843 bus_err: 1844 while (--i >= 0) 1845 bus_unregister(idxd_bus_types[i]); 1846 return rc; 1847 } 1848 1849 void idxd_unregister_bus_type(void) 1850 { 1851 int i; 1852 1853 for (i = 0; i < IDXD_TYPE_MAX; i++) 1854 bus_unregister(idxd_bus_types[i]); 1855 } 1856