1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/device.h> 8 #include <linux/io-64-nonatomic-lo-hi.h> 9 #include <uapi/linux/idxd.h> 10 #include "registers.h" 11 #include "idxd.h" 12 13 static char *idxd_wq_type_names[] = { 14 [IDXD_WQT_NONE] = "none", 15 [IDXD_WQT_KERNEL] = "kernel", 16 [IDXD_WQT_USER] = "user", 17 }; 18 19 /* IDXD engine attributes */ 20 static ssize_t engine_group_id_show(struct device *dev, 21 struct device_attribute *attr, char *buf) 22 { 23 struct idxd_engine *engine = confdev_to_engine(dev); 24 25 if (engine->group) 26 return sysfs_emit(buf, "%d\n", engine->group->id); 27 else 28 return sysfs_emit(buf, "%d\n", -1); 29 } 30 31 static ssize_t engine_group_id_store(struct device *dev, 32 struct device_attribute *attr, 33 const char *buf, size_t count) 34 { 35 struct idxd_engine *engine = confdev_to_engine(dev); 36 struct idxd_device *idxd = engine->idxd; 37 long id; 38 int rc; 39 struct idxd_group *prevg; 40 41 rc = kstrtol(buf, 10, &id); 42 if (rc < 0) 43 return -EINVAL; 44 45 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 46 return -EPERM; 47 48 if (id > idxd->max_groups - 1 || id < -1) 49 return -EINVAL; 50 51 if (id == -1) { 52 if (engine->group) { 53 engine->group->num_engines--; 54 engine->group = NULL; 55 } 56 return count; 57 } 58 59 prevg = engine->group; 60 61 if (prevg) 62 prevg->num_engines--; 63 engine->group = idxd->groups[id]; 64 engine->group->num_engines++; 65 66 return count; 67 } 68 69 static struct device_attribute dev_attr_engine_group = 70 __ATTR(group_id, 0644, engine_group_id_show, 71 engine_group_id_store); 72 73 static struct attribute *idxd_engine_attributes[] = { 74 &dev_attr_engine_group.attr, 75 NULL, 76 }; 77 78 static const struct attribute_group idxd_engine_attribute_group = { 79 .attrs = idxd_engine_attributes, 80 }; 81 82 static const struct attribute_group *idxd_engine_attribute_groups[] = { 83 &idxd_engine_attribute_group, 84 NULL, 85 }; 86 87 static void idxd_conf_engine_release(struct device *dev) 88 { 89 struct idxd_engine *engine = confdev_to_engine(dev); 90 91 kfree(engine); 92 } 93 94 struct device_type idxd_engine_device_type = { 95 .name = "engine", 96 .release = idxd_conf_engine_release, 97 .groups = idxd_engine_attribute_groups, 98 }; 99 100 /* Group attributes */ 101 102 static void idxd_set_free_rdbufs(struct idxd_device *idxd) 103 { 104 int i, rdbufs; 105 106 for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { 107 struct idxd_group *g = idxd->groups[i]; 108 109 rdbufs += g->rdbufs_reserved; 110 } 111 112 idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; 113 } 114 115 static ssize_t group_read_buffers_reserved_show(struct device *dev, 116 struct device_attribute *attr, 117 char *buf) 118 { 119 struct idxd_group *group = confdev_to_group(dev); 120 121 return sysfs_emit(buf, "%u\n", group->rdbufs_reserved); 122 } 123 124 static ssize_t group_tokens_reserved_show(struct device *dev, 125 struct device_attribute *attr, 126 char *buf) 127 { 128 dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n"); 129 return group_read_buffers_reserved_show(dev, attr, buf); 130 } 131 132 static ssize_t group_read_buffers_reserved_store(struct device *dev, 133 struct device_attribute *attr, 134 const char *buf, size_t count) 135 { 136 struct idxd_group *group = confdev_to_group(dev); 137 struct idxd_device *idxd = group->idxd; 138 unsigned long val; 139 int rc; 140 141 rc = kstrtoul(buf, 10, &val); 142 if (rc < 0) 143 return -EINVAL; 144 145 if (idxd->data->type == IDXD_TYPE_IAX) 146 return -EOPNOTSUPP; 147 148 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 149 return -EPERM; 150 151 if (idxd->state == IDXD_DEV_ENABLED) 152 return -EPERM; 153 154 if (val > idxd->max_rdbufs) 155 return -EINVAL; 156 157 if (val > idxd->nr_rdbufs + group->rdbufs_reserved) 158 return -EINVAL; 159 160 group->rdbufs_reserved = val; 161 idxd_set_free_rdbufs(idxd); 162 return count; 163 } 164 165 static ssize_t group_tokens_reserved_store(struct device *dev, 166 struct device_attribute *attr, 167 const char *buf, size_t count) 168 { 169 dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n"); 170 return group_read_buffers_reserved_store(dev, attr, buf, count); 171 } 172 173 static struct device_attribute dev_attr_group_tokens_reserved = 174 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, 175 group_tokens_reserved_store); 176 177 static struct device_attribute dev_attr_group_read_buffers_reserved = 178 __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show, 179 group_read_buffers_reserved_store); 180 181 static ssize_t group_read_buffers_allowed_show(struct device *dev, 182 struct device_attribute *attr, 183 char *buf) 184 { 185 struct idxd_group *group = confdev_to_group(dev); 186 187 return sysfs_emit(buf, "%u\n", group->rdbufs_allowed); 188 } 189 190 static ssize_t group_tokens_allowed_show(struct device *dev, 191 struct device_attribute *attr, 192 char *buf) 193 { 194 dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n"); 195 return group_read_buffers_allowed_show(dev, attr, buf); 196 } 197 198 static ssize_t group_read_buffers_allowed_store(struct device *dev, 199 struct device_attribute *attr, 200 const char *buf, size_t count) 201 { 202 struct idxd_group *group = confdev_to_group(dev); 203 struct idxd_device *idxd = group->idxd; 204 unsigned long val; 205 int rc; 206 207 rc = kstrtoul(buf, 10, &val); 208 if (rc < 0) 209 return -EINVAL; 210 211 if (idxd->data->type == IDXD_TYPE_IAX) 212 return -EOPNOTSUPP; 213 214 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 215 return -EPERM; 216 217 if (idxd->state == IDXD_DEV_ENABLED) 218 return -EPERM; 219 220 if (val < 4 * group->num_engines || 221 val > group->rdbufs_reserved + idxd->nr_rdbufs) 222 return -EINVAL; 223 224 group->rdbufs_allowed = val; 225 return count; 226 } 227 228 static ssize_t group_tokens_allowed_store(struct device *dev, 229 struct device_attribute *attr, 230 const char *buf, size_t count) 231 { 232 dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n"); 233 return group_read_buffers_allowed_store(dev, attr, buf, count); 234 } 235 236 static struct device_attribute dev_attr_group_tokens_allowed = 237 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, 238 group_tokens_allowed_store); 239 240 static struct device_attribute dev_attr_group_read_buffers_allowed = 241 __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show, 242 group_read_buffers_allowed_store); 243 244 static ssize_t group_use_read_buffer_limit_show(struct device *dev, 245 struct device_attribute *attr, 246 char *buf) 247 { 248 struct idxd_group *group = confdev_to_group(dev); 249 250 return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit); 251 } 252 253 static ssize_t group_use_token_limit_show(struct device *dev, 254 struct device_attribute *attr, 255 char *buf) 256 { 257 dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n"); 258 return group_use_read_buffer_limit_show(dev, attr, buf); 259 } 260 261 static ssize_t group_use_read_buffer_limit_store(struct device *dev, 262 struct device_attribute *attr, 263 const char *buf, size_t count) 264 { 265 struct idxd_group *group = confdev_to_group(dev); 266 struct idxd_device *idxd = group->idxd; 267 unsigned long val; 268 int rc; 269 270 rc = kstrtoul(buf, 10, &val); 271 if (rc < 0) 272 return -EINVAL; 273 274 if (idxd->data->type == IDXD_TYPE_IAX) 275 return -EOPNOTSUPP; 276 277 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 278 return -EPERM; 279 280 if (idxd->state == IDXD_DEV_ENABLED) 281 return -EPERM; 282 283 if (idxd->rdbuf_limit == 0) 284 return -EPERM; 285 286 group->use_rdbuf_limit = !!val; 287 return count; 288 } 289 290 static ssize_t group_use_token_limit_store(struct device *dev, 291 struct device_attribute *attr, 292 const char *buf, size_t count) 293 { 294 dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n"); 295 return group_use_read_buffer_limit_store(dev, attr, buf, count); 296 } 297 298 static struct device_attribute dev_attr_group_use_token_limit = 299 __ATTR(use_token_limit, 0644, group_use_token_limit_show, 300 group_use_token_limit_store); 301 302 static struct device_attribute dev_attr_group_use_read_buffer_limit = 303 __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show, 304 group_use_read_buffer_limit_store); 305 306 static ssize_t group_engines_show(struct device *dev, 307 struct device_attribute *attr, char *buf) 308 { 309 struct idxd_group *group = confdev_to_group(dev); 310 int i, rc = 0; 311 struct idxd_device *idxd = group->idxd; 312 313 for (i = 0; i < idxd->max_engines; i++) { 314 struct idxd_engine *engine = idxd->engines[i]; 315 316 if (!engine->group) 317 continue; 318 319 if (engine->group->id == group->id) 320 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id); 321 } 322 323 if (!rc) 324 return 0; 325 rc--; 326 rc += sysfs_emit_at(buf, rc, "\n"); 327 328 return rc; 329 } 330 331 static struct device_attribute dev_attr_group_engines = 332 __ATTR(engines, 0444, group_engines_show, NULL); 333 334 static ssize_t group_work_queues_show(struct device *dev, 335 struct device_attribute *attr, char *buf) 336 { 337 struct idxd_group *group = confdev_to_group(dev); 338 int i, rc = 0; 339 struct idxd_device *idxd = group->idxd; 340 341 for (i = 0; i < idxd->max_wqs; i++) { 342 struct idxd_wq *wq = idxd->wqs[i]; 343 344 if (!wq->group) 345 continue; 346 347 if (wq->group->id == group->id) 348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); 349 } 350 351 if (!rc) 352 return 0; 353 rc--; 354 rc += sysfs_emit_at(buf, rc, "\n"); 355 356 return rc; 357 } 358 359 static struct device_attribute dev_attr_group_work_queues = 360 __ATTR(work_queues, 0444, group_work_queues_show, NULL); 361 362 static ssize_t group_traffic_class_a_show(struct device *dev, 363 struct device_attribute *attr, 364 char *buf) 365 { 366 struct idxd_group *group = confdev_to_group(dev); 367 368 return sysfs_emit(buf, "%d\n", group->tc_a); 369 } 370 371 static ssize_t group_traffic_class_a_store(struct device *dev, 372 struct device_attribute *attr, 373 const char *buf, size_t count) 374 { 375 struct idxd_group *group = confdev_to_group(dev); 376 struct idxd_device *idxd = group->idxd; 377 long val; 378 int rc; 379 380 rc = kstrtol(buf, 10, &val); 381 if (rc < 0) 382 return -EINVAL; 383 384 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 385 return -EPERM; 386 387 if (idxd->state == IDXD_DEV_ENABLED) 388 return -EPERM; 389 390 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) 391 return -EPERM; 392 393 if (val < 0 || val > 7) 394 return -EINVAL; 395 396 group->tc_a = val; 397 return count; 398 } 399 400 static struct device_attribute dev_attr_group_traffic_class_a = 401 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, 402 group_traffic_class_a_store); 403 404 static ssize_t group_traffic_class_b_show(struct device *dev, 405 struct device_attribute *attr, 406 char *buf) 407 { 408 struct idxd_group *group = confdev_to_group(dev); 409 410 return sysfs_emit(buf, "%d\n", group->tc_b); 411 } 412 413 static ssize_t group_traffic_class_b_store(struct device *dev, 414 struct device_attribute *attr, 415 const char *buf, size_t count) 416 { 417 struct idxd_group *group = confdev_to_group(dev); 418 struct idxd_device *idxd = group->idxd; 419 long val; 420 int rc; 421 422 rc = kstrtol(buf, 10, &val); 423 if (rc < 0) 424 return -EINVAL; 425 426 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 427 return -EPERM; 428 429 if (idxd->state == IDXD_DEV_ENABLED) 430 return -EPERM; 431 432 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) 433 return -EPERM; 434 435 if (val < 0 || val > 7) 436 return -EINVAL; 437 438 group->tc_b = val; 439 return count; 440 } 441 442 static struct device_attribute dev_attr_group_traffic_class_b = 443 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, 444 group_traffic_class_b_store); 445 446 static struct attribute *idxd_group_attributes[] = { 447 &dev_attr_group_work_queues.attr, 448 &dev_attr_group_engines.attr, 449 &dev_attr_group_use_token_limit.attr, 450 &dev_attr_group_use_read_buffer_limit.attr, 451 &dev_attr_group_tokens_allowed.attr, 452 &dev_attr_group_read_buffers_allowed.attr, 453 &dev_attr_group_tokens_reserved.attr, 454 &dev_attr_group_read_buffers_reserved.attr, 455 &dev_attr_group_traffic_class_a.attr, 456 &dev_attr_group_traffic_class_b.attr, 457 NULL, 458 }; 459 460 static const struct attribute_group idxd_group_attribute_group = { 461 .attrs = idxd_group_attributes, 462 }; 463 464 static const struct attribute_group *idxd_group_attribute_groups[] = { 465 &idxd_group_attribute_group, 466 NULL, 467 }; 468 469 static void idxd_conf_group_release(struct device *dev) 470 { 471 struct idxd_group *group = confdev_to_group(dev); 472 473 kfree(group); 474 } 475 476 struct device_type idxd_group_device_type = { 477 .name = "group", 478 .release = idxd_conf_group_release, 479 .groups = idxd_group_attribute_groups, 480 }; 481 482 /* IDXD work queue attribs */ 483 static ssize_t wq_clients_show(struct device *dev, 484 struct device_attribute *attr, char *buf) 485 { 486 struct idxd_wq *wq = confdev_to_wq(dev); 487 488 return sysfs_emit(buf, "%d\n", wq->client_count); 489 } 490 491 static struct device_attribute dev_attr_wq_clients = 492 __ATTR(clients, 0444, wq_clients_show, NULL); 493 494 static ssize_t wq_state_show(struct device *dev, 495 struct device_attribute *attr, char *buf) 496 { 497 struct idxd_wq *wq = confdev_to_wq(dev); 498 499 switch (wq->state) { 500 case IDXD_WQ_DISABLED: 501 return sysfs_emit(buf, "disabled\n"); 502 case IDXD_WQ_ENABLED: 503 return sysfs_emit(buf, "enabled\n"); 504 } 505 506 return sysfs_emit(buf, "unknown\n"); 507 } 508 509 static struct device_attribute dev_attr_wq_state = 510 __ATTR(state, 0444, wq_state_show, NULL); 511 512 static ssize_t wq_group_id_show(struct device *dev, 513 struct device_attribute *attr, char *buf) 514 { 515 struct idxd_wq *wq = confdev_to_wq(dev); 516 517 if (wq->group) 518 return sysfs_emit(buf, "%u\n", wq->group->id); 519 else 520 return sysfs_emit(buf, "-1\n"); 521 } 522 523 static ssize_t wq_group_id_store(struct device *dev, 524 struct device_attribute *attr, 525 const char *buf, size_t count) 526 { 527 struct idxd_wq *wq = confdev_to_wq(dev); 528 struct idxd_device *idxd = wq->idxd; 529 long id; 530 int rc; 531 struct idxd_group *prevg, *group; 532 533 rc = kstrtol(buf, 10, &id); 534 if (rc < 0) 535 return -EINVAL; 536 537 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 538 return -EPERM; 539 540 if (wq->state != IDXD_WQ_DISABLED) 541 return -EPERM; 542 543 if (id > idxd->max_groups - 1 || id < -1) 544 return -EINVAL; 545 546 if (id == -1) { 547 if (wq->group) { 548 wq->group->num_wqs--; 549 wq->group = NULL; 550 } 551 return count; 552 } 553 554 group = idxd->groups[id]; 555 prevg = wq->group; 556 557 if (prevg) 558 prevg->num_wqs--; 559 wq->group = group; 560 group->num_wqs++; 561 return count; 562 } 563 564 static struct device_attribute dev_attr_wq_group_id = 565 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); 566 567 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, 568 char *buf) 569 { 570 struct idxd_wq *wq = confdev_to_wq(dev); 571 572 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared"); 573 } 574 575 static ssize_t wq_mode_store(struct device *dev, 576 struct device_attribute *attr, const char *buf, 577 size_t count) 578 { 579 struct idxd_wq *wq = confdev_to_wq(dev); 580 struct idxd_device *idxd = wq->idxd; 581 582 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 583 return -EPERM; 584 585 if (wq->state != IDXD_WQ_DISABLED) 586 return -EPERM; 587 588 if (sysfs_streq(buf, "dedicated")) { 589 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 590 wq->threshold = 0; 591 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) { 592 clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 593 } else { 594 return -EINVAL; 595 } 596 597 return count; 598 } 599 600 static struct device_attribute dev_attr_wq_mode = 601 __ATTR(mode, 0644, wq_mode_show, wq_mode_store); 602 603 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, 604 char *buf) 605 { 606 struct idxd_wq *wq = confdev_to_wq(dev); 607 608 return sysfs_emit(buf, "%u\n", wq->size); 609 } 610 611 static int total_claimed_wq_size(struct idxd_device *idxd) 612 { 613 int i; 614 int wq_size = 0; 615 616 for (i = 0; i < idxd->max_wqs; i++) { 617 struct idxd_wq *wq = idxd->wqs[i]; 618 619 wq_size += wq->size; 620 } 621 622 return wq_size; 623 } 624 625 static ssize_t wq_size_store(struct device *dev, 626 struct device_attribute *attr, const char *buf, 627 size_t count) 628 { 629 struct idxd_wq *wq = confdev_to_wq(dev); 630 unsigned long size; 631 struct idxd_device *idxd = wq->idxd; 632 int rc; 633 634 rc = kstrtoul(buf, 10, &size); 635 if (rc < 0) 636 return -EINVAL; 637 638 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 639 return -EPERM; 640 641 if (idxd->state == IDXD_DEV_ENABLED) 642 return -EPERM; 643 644 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) 645 return -EINVAL; 646 647 wq->size = size; 648 return count; 649 } 650 651 static struct device_attribute dev_attr_wq_size = 652 __ATTR(size, 0644, wq_size_show, wq_size_store); 653 654 static ssize_t wq_priority_show(struct device *dev, 655 struct device_attribute *attr, char *buf) 656 { 657 struct idxd_wq *wq = confdev_to_wq(dev); 658 659 return sysfs_emit(buf, "%u\n", wq->priority); 660 } 661 662 static ssize_t wq_priority_store(struct device *dev, 663 struct device_attribute *attr, 664 const char *buf, size_t count) 665 { 666 struct idxd_wq *wq = confdev_to_wq(dev); 667 unsigned long prio; 668 struct idxd_device *idxd = wq->idxd; 669 int rc; 670 671 rc = kstrtoul(buf, 10, &prio); 672 if (rc < 0) 673 return -EINVAL; 674 675 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 676 return -EPERM; 677 678 if (wq->state != IDXD_WQ_DISABLED) 679 return -EPERM; 680 681 if (prio > IDXD_MAX_PRIORITY) 682 return -EINVAL; 683 684 wq->priority = prio; 685 return count; 686 } 687 688 static struct device_attribute dev_attr_wq_priority = 689 __ATTR(priority, 0644, wq_priority_show, wq_priority_store); 690 691 static ssize_t wq_block_on_fault_show(struct device *dev, 692 struct device_attribute *attr, char *buf) 693 { 694 struct idxd_wq *wq = confdev_to_wq(dev); 695 696 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); 697 } 698 699 static ssize_t wq_block_on_fault_store(struct device *dev, 700 struct device_attribute *attr, 701 const char *buf, size_t count) 702 { 703 struct idxd_wq *wq = confdev_to_wq(dev); 704 struct idxd_device *idxd = wq->idxd; 705 bool bof; 706 int rc; 707 708 if (!idxd->hw.gen_cap.block_on_fault) 709 return -EOPNOTSUPP; 710 711 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 712 return -EPERM; 713 714 if (wq->state != IDXD_WQ_DISABLED) 715 return -ENXIO; 716 717 rc = kstrtobool(buf, &bof); 718 if (rc < 0) 719 return rc; 720 721 if (bof) 722 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 723 else 724 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 725 726 return count; 727 } 728 729 static struct device_attribute dev_attr_wq_block_on_fault = 730 __ATTR(block_on_fault, 0644, wq_block_on_fault_show, 731 wq_block_on_fault_store); 732 733 static ssize_t wq_threshold_show(struct device *dev, 734 struct device_attribute *attr, char *buf) 735 { 736 struct idxd_wq *wq = confdev_to_wq(dev); 737 738 return sysfs_emit(buf, "%u\n", wq->threshold); 739 } 740 741 static ssize_t wq_threshold_store(struct device *dev, 742 struct device_attribute *attr, 743 const char *buf, size_t count) 744 { 745 struct idxd_wq *wq = confdev_to_wq(dev); 746 struct idxd_device *idxd = wq->idxd; 747 unsigned int val; 748 int rc; 749 750 rc = kstrtouint(buf, 0, &val); 751 if (rc < 0) 752 return -EINVAL; 753 754 if (val > wq->size || val <= 0) 755 return -EINVAL; 756 757 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 758 return -EPERM; 759 760 if (wq->state != IDXD_WQ_DISABLED) 761 return -ENXIO; 762 763 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags)) 764 return -EINVAL; 765 766 wq->threshold = val; 767 768 return count; 769 } 770 771 static struct device_attribute dev_attr_wq_threshold = 772 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store); 773 774 static ssize_t wq_type_show(struct device *dev, 775 struct device_attribute *attr, char *buf) 776 { 777 struct idxd_wq *wq = confdev_to_wq(dev); 778 779 switch (wq->type) { 780 case IDXD_WQT_KERNEL: 781 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]); 782 case IDXD_WQT_USER: 783 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]); 784 case IDXD_WQT_NONE: 785 default: 786 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]); 787 } 788 789 return -EINVAL; 790 } 791 792 static ssize_t wq_type_store(struct device *dev, 793 struct device_attribute *attr, const char *buf, 794 size_t count) 795 { 796 struct idxd_wq *wq = confdev_to_wq(dev); 797 enum idxd_wq_type old_type; 798 799 if (wq->state != IDXD_WQ_DISABLED) 800 return -EPERM; 801 802 old_type = wq->type; 803 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) 804 wq->type = IDXD_WQT_NONE; 805 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) 806 wq->type = IDXD_WQT_KERNEL; 807 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) 808 wq->type = IDXD_WQT_USER; 809 else 810 return -EINVAL; 811 812 /* If we are changing queue type, clear the name */ 813 if (wq->type != old_type) 814 memset(wq->name, 0, WQ_NAME_SIZE + 1); 815 816 return count; 817 } 818 819 static struct device_attribute dev_attr_wq_type = 820 __ATTR(type, 0644, wq_type_show, wq_type_store); 821 822 static ssize_t wq_name_show(struct device *dev, 823 struct device_attribute *attr, char *buf) 824 { 825 struct idxd_wq *wq = confdev_to_wq(dev); 826 827 return sysfs_emit(buf, "%s\n", wq->name); 828 } 829 830 static ssize_t wq_name_store(struct device *dev, 831 struct device_attribute *attr, const char *buf, 832 size_t count) 833 { 834 struct idxd_wq *wq = confdev_to_wq(dev); 835 836 if (wq->state != IDXD_WQ_DISABLED) 837 return -EPERM; 838 839 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) 840 return -EINVAL; 841 842 /* 843 * This is temporarily placed here until we have SVM support for 844 * dmaengine. 845 */ 846 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) 847 return -EOPNOTSUPP; 848 849 memset(wq->name, 0, WQ_NAME_SIZE + 1); 850 strncpy(wq->name, buf, WQ_NAME_SIZE); 851 strreplace(wq->name, '\n', '\0'); 852 return count; 853 } 854 855 static struct device_attribute dev_attr_wq_name = 856 __ATTR(name, 0644, wq_name_show, wq_name_store); 857 858 static ssize_t wq_cdev_minor_show(struct device *dev, 859 struct device_attribute *attr, char *buf) 860 { 861 struct idxd_wq *wq = confdev_to_wq(dev); 862 int minor = -1; 863 864 mutex_lock(&wq->wq_lock); 865 if (wq->idxd_cdev) 866 minor = wq->idxd_cdev->minor; 867 mutex_unlock(&wq->wq_lock); 868 869 if (minor == -1) 870 return -ENXIO; 871 return sysfs_emit(buf, "%d\n", minor); 872 } 873 874 static struct device_attribute dev_attr_wq_cdev_minor = 875 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); 876 877 static int __get_sysfs_u64(const char *buf, u64 *val) 878 { 879 int rc; 880 881 rc = kstrtou64(buf, 0, val); 882 if (rc < 0) 883 return -EINVAL; 884 885 if (*val == 0) 886 return -EINVAL; 887 888 *val = roundup_pow_of_two(*val); 889 return 0; 890 } 891 892 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr, 893 char *buf) 894 { 895 struct idxd_wq *wq = confdev_to_wq(dev); 896 897 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes); 898 } 899 900 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, 901 const char *buf, size_t count) 902 { 903 struct idxd_wq *wq = confdev_to_wq(dev); 904 struct idxd_device *idxd = wq->idxd; 905 u64 xfer_size; 906 int rc; 907 908 if (wq->state != IDXD_WQ_DISABLED) 909 return -EPERM; 910 911 rc = __get_sysfs_u64(buf, &xfer_size); 912 if (rc < 0) 913 return rc; 914 915 if (xfer_size > idxd->max_xfer_bytes) 916 return -EINVAL; 917 918 wq->max_xfer_bytes = xfer_size; 919 920 return count; 921 } 922 923 static struct device_attribute dev_attr_wq_max_transfer_size = 924 __ATTR(max_transfer_size, 0644, 925 wq_max_transfer_size_show, wq_max_transfer_size_store); 926 927 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf) 928 { 929 struct idxd_wq *wq = confdev_to_wq(dev); 930 931 return sysfs_emit(buf, "%u\n", wq->max_batch_size); 932 } 933 934 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, 935 const char *buf, size_t count) 936 { 937 struct idxd_wq *wq = confdev_to_wq(dev); 938 struct idxd_device *idxd = wq->idxd; 939 u64 batch_size; 940 int rc; 941 942 if (wq->state != IDXD_WQ_DISABLED) 943 return -EPERM; 944 945 rc = __get_sysfs_u64(buf, &batch_size); 946 if (rc < 0) 947 return rc; 948 949 if (batch_size > idxd->max_batch_size) 950 return -EINVAL; 951 952 wq->max_batch_size = (u32)batch_size; 953 954 return count; 955 } 956 957 static struct device_attribute dev_attr_wq_max_batch_size = 958 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store); 959 960 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf) 961 { 962 struct idxd_wq *wq = confdev_to_wq(dev); 963 964 return sysfs_emit(buf, "%u\n", wq->ats_dis); 965 } 966 967 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr, 968 const char *buf, size_t count) 969 { 970 struct idxd_wq *wq = confdev_to_wq(dev); 971 struct idxd_device *idxd = wq->idxd; 972 bool ats_dis; 973 int rc; 974 975 if (wq->state != IDXD_WQ_DISABLED) 976 return -EPERM; 977 978 if (!idxd->hw.wq_cap.wq_ats_support) 979 return -EOPNOTSUPP; 980 981 rc = kstrtobool(buf, &ats_dis); 982 if (rc < 0) 983 return rc; 984 985 wq->ats_dis = ats_dis; 986 987 return count; 988 } 989 990 static struct device_attribute dev_attr_wq_ats_disable = 991 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store); 992 993 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf) 994 { 995 struct idxd_wq *wq = confdev_to_wq(dev); 996 struct idxd_device *idxd = wq->idxd; 997 u32 occup, offset; 998 999 if (!idxd->hw.wq_cap.occupancy) 1000 return -EOPNOTSUPP; 1001 1002 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX); 1003 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK; 1004 1005 return sysfs_emit(buf, "%u\n", occup); 1006 } 1007 1008 static struct device_attribute dev_attr_wq_occupancy = 1009 __ATTR(occupancy, 0444, wq_occupancy_show, NULL); 1010 1011 static ssize_t wq_enqcmds_retries_show(struct device *dev, 1012 struct device_attribute *attr, char *buf) 1013 { 1014 struct idxd_wq *wq = confdev_to_wq(dev); 1015 1016 if (wq_dedicated(wq)) 1017 return -EOPNOTSUPP; 1018 1019 return sysfs_emit(buf, "%u\n", wq->enqcmds_retries); 1020 } 1021 1022 static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr, 1023 const char *buf, size_t count) 1024 { 1025 struct idxd_wq *wq = confdev_to_wq(dev); 1026 int rc; 1027 unsigned int retries; 1028 1029 if (wq_dedicated(wq)) 1030 return -EOPNOTSUPP; 1031 1032 rc = kstrtouint(buf, 10, &retries); 1033 if (rc < 0) 1034 return rc; 1035 1036 if (retries > IDXD_ENQCMDS_MAX_RETRIES) 1037 retries = IDXD_ENQCMDS_MAX_RETRIES; 1038 1039 wq->enqcmds_retries = retries; 1040 return count; 1041 } 1042 1043 static struct device_attribute dev_attr_wq_enqcmds_retries = 1044 __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store); 1045 1046 static struct attribute *idxd_wq_attributes[] = { 1047 &dev_attr_wq_clients.attr, 1048 &dev_attr_wq_state.attr, 1049 &dev_attr_wq_group_id.attr, 1050 &dev_attr_wq_mode.attr, 1051 &dev_attr_wq_size.attr, 1052 &dev_attr_wq_priority.attr, 1053 &dev_attr_wq_block_on_fault.attr, 1054 &dev_attr_wq_threshold.attr, 1055 &dev_attr_wq_type.attr, 1056 &dev_attr_wq_name.attr, 1057 &dev_attr_wq_cdev_minor.attr, 1058 &dev_attr_wq_max_transfer_size.attr, 1059 &dev_attr_wq_max_batch_size.attr, 1060 &dev_attr_wq_ats_disable.attr, 1061 &dev_attr_wq_occupancy.attr, 1062 &dev_attr_wq_enqcmds_retries.attr, 1063 NULL, 1064 }; 1065 1066 static const struct attribute_group idxd_wq_attribute_group = { 1067 .attrs = idxd_wq_attributes, 1068 }; 1069 1070 static const struct attribute_group *idxd_wq_attribute_groups[] = { 1071 &idxd_wq_attribute_group, 1072 NULL, 1073 }; 1074 1075 static void idxd_conf_wq_release(struct device *dev) 1076 { 1077 struct idxd_wq *wq = confdev_to_wq(dev); 1078 1079 kfree(wq->wqcfg); 1080 kfree(wq); 1081 } 1082 1083 struct device_type idxd_wq_device_type = { 1084 .name = "wq", 1085 .release = idxd_conf_wq_release, 1086 .groups = idxd_wq_attribute_groups, 1087 }; 1088 1089 /* IDXD device attribs */ 1090 static ssize_t version_show(struct device *dev, struct device_attribute *attr, 1091 char *buf) 1092 { 1093 struct idxd_device *idxd = confdev_to_idxd(dev); 1094 1095 return sysfs_emit(buf, "%#x\n", idxd->hw.version); 1096 } 1097 static DEVICE_ATTR_RO(version); 1098 1099 static ssize_t max_work_queues_size_show(struct device *dev, 1100 struct device_attribute *attr, 1101 char *buf) 1102 { 1103 struct idxd_device *idxd = confdev_to_idxd(dev); 1104 1105 return sysfs_emit(buf, "%u\n", idxd->max_wq_size); 1106 } 1107 static DEVICE_ATTR_RO(max_work_queues_size); 1108 1109 static ssize_t max_groups_show(struct device *dev, 1110 struct device_attribute *attr, char *buf) 1111 { 1112 struct idxd_device *idxd = confdev_to_idxd(dev); 1113 1114 return sysfs_emit(buf, "%u\n", idxd->max_groups); 1115 } 1116 static DEVICE_ATTR_RO(max_groups); 1117 1118 static ssize_t max_work_queues_show(struct device *dev, 1119 struct device_attribute *attr, char *buf) 1120 { 1121 struct idxd_device *idxd = confdev_to_idxd(dev); 1122 1123 return sysfs_emit(buf, "%u\n", idxd->max_wqs); 1124 } 1125 static DEVICE_ATTR_RO(max_work_queues); 1126 1127 static ssize_t max_engines_show(struct device *dev, 1128 struct device_attribute *attr, char *buf) 1129 { 1130 struct idxd_device *idxd = confdev_to_idxd(dev); 1131 1132 return sysfs_emit(buf, "%u\n", idxd->max_engines); 1133 } 1134 static DEVICE_ATTR_RO(max_engines); 1135 1136 static ssize_t numa_node_show(struct device *dev, 1137 struct device_attribute *attr, char *buf) 1138 { 1139 struct idxd_device *idxd = confdev_to_idxd(dev); 1140 1141 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1142 } 1143 static DEVICE_ATTR_RO(numa_node); 1144 1145 static ssize_t max_batch_size_show(struct device *dev, 1146 struct device_attribute *attr, char *buf) 1147 { 1148 struct idxd_device *idxd = confdev_to_idxd(dev); 1149 1150 return sysfs_emit(buf, "%u\n", idxd->max_batch_size); 1151 } 1152 static DEVICE_ATTR_RO(max_batch_size); 1153 1154 static ssize_t max_transfer_size_show(struct device *dev, 1155 struct device_attribute *attr, 1156 char *buf) 1157 { 1158 struct idxd_device *idxd = confdev_to_idxd(dev); 1159 1160 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes); 1161 } 1162 static DEVICE_ATTR_RO(max_transfer_size); 1163 1164 static ssize_t op_cap_show(struct device *dev, 1165 struct device_attribute *attr, char *buf) 1166 { 1167 struct idxd_device *idxd = confdev_to_idxd(dev); 1168 int i, rc = 0; 1169 1170 for (i = 0; i < 4; i++) 1171 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]); 1172 1173 rc--; 1174 rc += sysfs_emit_at(buf, rc, "\n"); 1175 return rc; 1176 } 1177 static DEVICE_ATTR_RO(op_cap); 1178 1179 static ssize_t gen_cap_show(struct device *dev, 1180 struct device_attribute *attr, char *buf) 1181 { 1182 struct idxd_device *idxd = confdev_to_idxd(dev); 1183 1184 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits); 1185 } 1186 static DEVICE_ATTR_RO(gen_cap); 1187 1188 static ssize_t configurable_show(struct device *dev, 1189 struct device_attribute *attr, char *buf) 1190 { 1191 struct idxd_device *idxd = confdev_to_idxd(dev); 1192 1193 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1194 } 1195 static DEVICE_ATTR_RO(configurable); 1196 1197 static ssize_t clients_show(struct device *dev, 1198 struct device_attribute *attr, char *buf) 1199 { 1200 struct idxd_device *idxd = confdev_to_idxd(dev); 1201 int count = 0, i; 1202 1203 spin_lock(&idxd->dev_lock); 1204 for (i = 0; i < idxd->max_wqs; i++) { 1205 struct idxd_wq *wq = idxd->wqs[i]; 1206 1207 count += wq->client_count; 1208 } 1209 spin_unlock(&idxd->dev_lock); 1210 1211 return sysfs_emit(buf, "%d\n", count); 1212 } 1213 static DEVICE_ATTR_RO(clients); 1214 1215 static ssize_t pasid_enabled_show(struct device *dev, 1216 struct device_attribute *attr, char *buf) 1217 { 1218 struct idxd_device *idxd = confdev_to_idxd(dev); 1219 1220 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd)); 1221 } 1222 static DEVICE_ATTR_RO(pasid_enabled); 1223 1224 static ssize_t state_show(struct device *dev, 1225 struct device_attribute *attr, char *buf) 1226 { 1227 struct idxd_device *idxd = confdev_to_idxd(dev); 1228 1229 switch (idxd->state) { 1230 case IDXD_DEV_DISABLED: 1231 return sysfs_emit(buf, "disabled\n"); 1232 case IDXD_DEV_ENABLED: 1233 return sysfs_emit(buf, "enabled\n"); 1234 case IDXD_DEV_HALTED: 1235 return sysfs_emit(buf, "halted\n"); 1236 } 1237 1238 return sysfs_emit(buf, "unknown\n"); 1239 } 1240 static DEVICE_ATTR_RO(state); 1241 1242 static ssize_t errors_show(struct device *dev, 1243 struct device_attribute *attr, char *buf) 1244 { 1245 struct idxd_device *idxd = confdev_to_idxd(dev); 1246 int i, out = 0; 1247 1248 spin_lock(&idxd->dev_lock); 1249 for (i = 0; i < 4; i++) 1250 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]); 1251 spin_unlock(&idxd->dev_lock); 1252 out--; 1253 out += sysfs_emit_at(buf, out, "\n"); 1254 return out; 1255 } 1256 static DEVICE_ATTR_RO(errors); 1257 1258 static ssize_t max_read_buffers_show(struct device *dev, 1259 struct device_attribute *attr, char *buf) 1260 { 1261 struct idxd_device *idxd = confdev_to_idxd(dev); 1262 1263 return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); 1264 } 1265 1266 static ssize_t max_tokens_show(struct device *dev, 1267 struct device_attribute *attr, char *buf) 1268 { 1269 dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n"); 1270 return max_read_buffers_show(dev, attr, buf); 1271 } 1272 1273 static DEVICE_ATTR_RO(max_tokens); /* deprecated */ 1274 static DEVICE_ATTR_RO(max_read_buffers); 1275 1276 static ssize_t read_buffer_limit_show(struct device *dev, 1277 struct device_attribute *attr, char *buf) 1278 { 1279 struct idxd_device *idxd = confdev_to_idxd(dev); 1280 1281 return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); 1282 } 1283 1284 static ssize_t token_limit_show(struct device *dev, 1285 struct device_attribute *attr, char *buf) 1286 { 1287 dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n"); 1288 return read_buffer_limit_show(dev, attr, buf); 1289 } 1290 1291 static ssize_t read_buffer_limit_store(struct device *dev, 1292 struct device_attribute *attr, 1293 const char *buf, size_t count) 1294 { 1295 struct idxd_device *idxd = confdev_to_idxd(dev); 1296 unsigned long val; 1297 int rc; 1298 1299 rc = kstrtoul(buf, 10, &val); 1300 if (rc < 0) 1301 return -EINVAL; 1302 1303 if (idxd->state == IDXD_DEV_ENABLED) 1304 return -EPERM; 1305 1306 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1307 return -EPERM; 1308 1309 if (!idxd->hw.group_cap.rdbuf_limit) 1310 return -EPERM; 1311 1312 if (val > idxd->hw.group_cap.total_rdbufs) 1313 return -EINVAL; 1314 1315 idxd->rdbuf_limit = val; 1316 return count; 1317 } 1318 1319 static ssize_t token_limit_store(struct device *dev, 1320 struct device_attribute *attr, 1321 const char *buf, size_t count) 1322 { 1323 dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n"); 1324 return read_buffer_limit_store(dev, attr, buf, count); 1325 } 1326 1327 static DEVICE_ATTR_RW(token_limit); /* deprecated */ 1328 static DEVICE_ATTR_RW(read_buffer_limit); 1329 1330 static ssize_t cdev_major_show(struct device *dev, 1331 struct device_attribute *attr, char *buf) 1332 { 1333 struct idxd_device *idxd = confdev_to_idxd(dev); 1334 1335 return sysfs_emit(buf, "%u\n", idxd->major); 1336 } 1337 static DEVICE_ATTR_RO(cdev_major); 1338 1339 static ssize_t cmd_status_show(struct device *dev, 1340 struct device_attribute *attr, char *buf) 1341 { 1342 struct idxd_device *idxd = confdev_to_idxd(dev); 1343 1344 return sysfs_emit(buf, "%#x\n", idxd->cmd_status); 1345 } 1346 1347 static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr, 1348 const char *buf, size_t count) 1349 { 1350 struct idxd_device *idxd = confdev_to_idxd(dev); 1351 1352 idxd->cmd_status = 0; 1353 return count; 1354 } 1355 static DEVICE_ATTR_RW(cmd_status); 1356 1357 static struct attribute *idxd_device_attributes[] = { 1358 &dev_attr_version.attr, 1359 &dev_attr_max_groups.attr, 1360 &dev_attr_max_work_queues.attr, 1361 &dev_attr_max_work_queues_size.attr, 1362 &dev_attr_max_engines.attr, 1363 &dev_attr_numa_node.attr, 1364 &dev_attr_max_batch_size.attr, 1365 &dev_attr_max_transfer_size.attr, 1366 &dev_attr_op_cap.attr, 1367 &dev_attr_gen_cap.attr, 1368 &dev_attr_configurable.attr, 1369 &dev_attr_clients.attr, 1370 &dev_attr_pasid_enabled.attr, 1371 &dev_attr_state.attr, 1372 &dev_attr_errors.attr, 1373 &dev_attr_max_tokens.attr, 1374 &dev_attr_max_read_buffers.attr, 1375 &dev_attr_token_limit.attr, 1376 &dev_attr_read_buffer_limit.attr, 1377 &dev_attr_cdev_major.attr, 1378 &dev_attr_cmd_status.attr, 1379 NULL, 1380 }; 1381 1382 static const struct attribute_group idxd_device_attribute_group = { 1383 .attrs = idxd_device_attributes, 1384 }; 1385 1386 static const struct attribute_group *idxd_attribute_groups[] = { 1387 &idxd_device_attribute_group, 1388 NULL, 1389 }; 1390 1391 static void idxd_conf_device_release(struct device *dev) 1392 { 1393 struct idxd_device *idxd = confdev_to_idxd(dev); 1394 1395 kfree(idxd->groups); 1396 kfree(idxd->wqs); 1397 kfree(idxd->engines); 1398 ida_free(&idxd_ida, idxd->id); 1399 kfree(idxd); 1400 } 1401 1402 struct device_type dsa_device_type = { 1403 .name = "dsa", 1404 .release = idxd_conf_device_release, 1405 .groups = idxd_attribute_groups, 1406 }; 1407 1408 struct device_type iax_device_type = { 1409 .name = "iax", 1410 .release = idxd_conf_device_release, 1411 .groups = idxd_attribute_groups, 1412 }; 1413 1414 static int idxd_register_engine_devices(struct idxd_device *idxd) 1415 { 1416 struct idxd_engine *engine; 1417 int i, j, rc; 1418 1419 for (i = 0; i < idxd->max_engines; i++) { 1420 engine = idxd->engines[i]; 1421 rc = device_add(engine_confdev(engine)); 1422 if (rc < 0) 1423 goto cleanup; 1424 } 1425 1426 return 0; 1427 1428 cleanup: 1429 j = i - 1; 1430 for (; i < idxd->max_engines; i++) { 1431 engine = idxd->engines[i]; 1432 put_device(engine_confdev(engine)); 1433 } 1434 1435 while (j--) { 1436 engine = idxd->engines[j]; 1437 device_unregister(engine_confdev(engine)); 1438 } 1439 return rc; 1440 } 1441 1442 static int idxd_register_group_devices(struct idxd_device *idxd) 1443 { 1444 struct idxd_group *group; 1445 int i, j, rc; 1446 1447 for (i = 0; i < idxd->max_groups; i++) { 1448 group = idxd->groups[i]; 1449 rc = device_add(group_confdev(group)); 1450 if (rc < 0) 1451 goto cleanup; 1452 } 1453 1454 return 0; 1455 1456 cleanup: 1457 j = i - 1; 1458 for (; i < idxd->max_groups; i++) { 1459 group = idxd->groups[i]; 1460 put_device(group_confdev(group)); 1461 } 1462 1463 while (j--) { 1464 group = idxd->groups[j]; 1465 device_unregister(group_confdev(group)); 1466 } 1467 return rc; 1468 } 1469 1470 static int idxd_register_wq_devices(struct idxd_device *idxd) 1471 { 1472 struct idxd_wq *wq; 1473 int i, rc, j; 1474 1475 for (i = 0; i < idxd->max_wqs; i++) { 1476 wq = idxd->wqs[i]; 1477 rc = device_add(wq_confdev(wq)); 1478 if (rc < 0) 1479 goto cleanup; 1480 } 1481 1482 return 0; 1483 1484 cleanup: 1485 j = i - 1; 1486 for (; i < idxd->max_wqs; i++) { 1487 wq = idxd->wqs[i]; 1488 put_device(wq_confdev(wq)); 1489 } 1490 1491 while (j--) { 1492 wq = idxd->wqs[j]; 1493 device_unregister(wq_confdev(wq)); 1494 } 1495 return rc; 1496 } 1497 1498 int idxd_register_devices(struct idxd_device *idxd) 1499 { 1500 struct device *dev = &idxd->pdev->dev; 1501 int rc, i; 1502 1503 rc = device_add(idxd_confdev(idxd)); 1504 if (rc < 0) 1505 return rc; 1506 1507 rc = idxd_register_wq_devices(idxd); 1508 if (rc < 0) { 1509 dev_dbg(dev, "WQ devices registering failed: %d\n", rc); 1510 goto err_wq; 1511 } 1512 1513 rc = idxd_register_engine_devices(idxd); 1514 if (rc < 0) { 1515 dev_dbg(dev, "Engine devices registering failed: %d\n", rc); 1516 goto err_engine; 1517 } 1518 1519 rc = idxd_register_group_devices(idxd); 1520 if (rc < 0) { 1521 dev_dbg(dev, "Group device registering failed: %d\n", rc); 1522 goto err_group; 1523 } 1524 1525 return 0; 1526 1527 err_group: 1528 for (i = 0; i < idxd->max_engines; i++) 1529 device_unregister(engine_confdev(idxd->engines[i])); 1530 err_engine: 1531 for (i = 0; i < idxd->max_wqs; i++) 1532 device_unregister(wq_confdev(idxd->wqs[i])); 1533 err_wq: 1534 device_del(idxd_confdev(idxd)); 1535 return rc; 1536 } 1537 1538 void idxd_unregister_devices(struct idxd_device *idxd) 1539 { 1540 int i; 1541 1542 for (i = 0; i < idxd->max_wqs; i++) { 1543 struct idxd_wq *wq = idxd->wqs[i]; 1544 1545 device_unregister(wq_confdev(wq)); 1546 } 1547 1548 for (i = 0; i < idxd->max_engines; i++) { 1549 struct idxd_engine *engine = idxd->engines[i]; 1550 1551 device_unregister(engine_confdev(engine)); 1552 } 1553 1554 for (i = 0; i < idxd->max_groups; i++) { 1555 struct idxd_group *group = idxd->groups[i]; 1556 1557 device_unregister(group_confdev(group)); 1558 } 1559 } 1560 1561 int idxd_register_bus_type(void) 1562 { 1563 return bus_register(&dsa_bus_type); 1564 } 1565 1566 void idxd_unregister_bus_type(void) 1567 { 1568 bus_unregister(&dsa_bus_type); 1569 } 1570