1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/io-64-nonatomic-lo-hi.h> 8 #include <linux/dmaengine.h> 9 #include <uapi/linux/idxd.h> 10 #include "../dmaengine.h" 11 #include "idxd.h" 12 #include "registers.h" 13 14 static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout); 15 static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand); 16 17 /* Interrupt control bits */ 18 int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) 19 { 20 struct pci_dev *pdev = idxd->pdev; 21 int msixcnt = pci_msix_vec_count(pdev); 22 union msix_perm perm; 23 u32 offset; 24 25 if (vec_id < 0 || vec_id >= msixcnt) 26 return -EINVAL; 27 28 offset = idxd->msix_perm_offset + vec_id * 8; 29 perm.bits = ioread32(idxd->reg_base + offset); 30 perm.ignore = 1; 31 iowrite32(perm.bits, idxd->reg_base + offset); 32 33 return 0; 34 } 35 36 void idxd_mask_msix_vectors(struct idxd_device *idxd) 37 { 38 struct pci_dev *pdev = idxd->pdev; 39 int msixcnt = pci_msix_vec_count(pdev); 40 int i, rc; 41 42 for (i = 0; i < msixcnt; i++) { 43 rc = idxd_mask_msix_vector(idxd, i); 44 if (rc < 0) 45 dev_warn(&pdev->dev, 46 "Failed disabling msix vec %d\n", i); 47 } 48 } 49 50 int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) 51 { 52 struct pci_dev *pdev = idxd->pdev; 53 int msixcnt = pci_msix_vec_count(pdev); 54 union msix_perm perm; 55 u32 offset; 56 57 if (vec_id < 0 || vec_id >= msixcnt) 58 return -EINVAL; 59 60 offset = idxd->msix_perm_offset + vec_id * 8; 61 perm.bits = ioread32(idxd->reg_base + offset); 62 perm.ignore = 0; 63 iowrite32(perm.bits, idxd->reg_base + offset); 64 65 /* 66 * A readback from the device ensures that any previously generated 67 * completion record writes are visible to software based on PCI 68 * ordering rules. 69 */ 70 perm.bits = ioread32(idxd->reg_base + offset); 71 72 return 0; 73 } 74 75 void idxd_unmask_error_interrupts(struct idxd_device *idxd) 76 { 77 union genctrl_reg genctrl; 78 79 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); 80 genctrl.softerr_int_en = 1; 81 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); 82 } 83 84 void idxd_mask_error_interrupts(struct idxd_device *idxd) 85 { 86 union genctrl_reg genctrl; 87 88 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); 89 genctrl.softerr_int_en = 0; 90 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); 91 } 92 93 static void free_hw_descs(struct idxd_wq *wq) 94 { 95 int i; 96 97 for (i = 0; i < wq->num_descs; i++) 98 kfree(wq->hw_descs[i]); 99 100 kfree(wq->hw_descs); 101 } 102 103 static int alloc_hw_descs(struct idxd_wq *wq, int num) 104 { 105 struct device *dev = &wq->idxd->pdev->dev; 106 int i; 107 int node = dev_to_node(dev); 108 109 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), 110 GFP_KERNEL, node); 111 if (!wq->hw_descs) 112 return -ENOMEM; 113 114 for (i = 0; i < num; i++) { 115 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), 116 GFP_KERNEL, node); 117 if (!wq->hw_descs[i]) { 118 free_hw_descs(wq); 119 return -ENOMEM; 120 } 121 } 122 123 return 0; 124 } 125 126 static void free_descs(struct idxd_wq *wq) 127 { 128 int i; 129 130 for (i = 0; i < wq->num_descs; i++) 131 kfree(wq->descs[i]); 132 133 kfree(wq->descs); 134 } 135 136 static int alloc_descs(struct idxd_wq *wq, int num) 137 { 138 struct device *dev = &wq->idxd->pdev->dev; 139 int i; 140 int node = dev_to_node(dev); 141 142 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), 143 GFP_KERNEL, node); 144 if (!wq->descs) 145 return -ENOMEM; 146 147 for (i = 0; i < num; i++) { 148 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), 149 GFP_KERNEL, node); 150 if (!wq->descs[i]) { 151 free_descs(wq); 152 return -ENOMEM; 153 } 154 } 155 156 return 0; 157 } 158 159 /* WQ control bits */ 160 int idxd_wq_alloc_resources(struct idxd_wq *wq) 161 { 162 struct idxd_device *idxd = wq->idxd; 163 struct idxd_group *group = wq->group; 164 struct device *dev = &idxd->pdev->dev; 165 int rc, num_descs, i; 166 167 if (wq->type != IDXD_WQT_KERNEL) 168 return 0; 169 170 num_descs = wq->size + 171 idxd->hw.gen_cap.max_descs_per_engine * group->num_engines; 172 wq->num_descs = num_descs; 173 174 rc = alloc_hw_descs(wq, num_descs); 175 if (rc < 0) 176 return rc; 177 178 wq->compls_size = num_descs * sizeof(struct dsa_completion_record); 179 wq->compls = dma_alloc_coherent(dev, wq->compls_size, 180 &wq->compls_addr, GFP_KERNEL); 181 if (!wq->compls) { 182 rc = -ENOMEM; 183 goto fail_alloc_compls; 184 } 185 186 rc = alloc_descs(wq, num_descs); 187 if (rc < 0) 188 goto fail_alloc_descs; 189 190 rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL, 191 dev_to_node(dev)); 192 if (rc < 0) 193 goto fail_sbitmap_init; 194 195 for (i = 0; i < num_descs; i++) { 196 struct idxd_desc *desc = wq->descs[i]; 197 198 desc->hw = wq->hw_descs[i]; 199 desc->completion = &wq->compls[i]; 200 desc->compl_dma = wq->compls_addr + 201 sizeof(struct dsa_completion_record) * i; 202 desc->id = i; 203 desc->wq = wq; 204 205 dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan); 206 desc->txd.tx_submit = idxd_dma_tx_submit; 207 } 208 209 return 0; 210 211 fail_sbitmap_init: 212 free_descs(wq); 213 fail_alloc_descs: 214 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); 215 fail_alloc_compls: 216 free_hw_descs(wq); 217 return rc; 218 } 219 220 void idxd_wq_free_resources(struct idxd_wq *wq) 221 { 222 struct device *dev = &wq->idxd->pdev->dev; 223 224 if (wq->type != IDXD_WQT_KERNEL) 225 return; 226 227 free_hw_descs(wq); 228 free_descs(wq); 229 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); 230 sbitmap_free(&wq->sbmap); 231 } 232 233 int idxd_wq_enable(struct idxd_wq *wq) 234 { 235 struct idxd_device *idxd = wq->idxd; 236 struct device *dev = &idxd->pdev->dev; 237 u32 status; 238 int rc; 239 240 lockdep_assert_held(&idxd->dev_lock); 241 242 if (wq->state == IDXD_WQ_ENABLED) { 243 dev_dbg(dev, "WQ %d already enabled\n", wq->id); 244 return -ENXIO; 245 } 246 247 rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id); 248 if (rc < 0) 249 return rc; 250 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); 251 if (rc < 0) 252 return rc; 253 254 if (status != IDXD_CMDSTS_SUCCESS && 255 status != IDXD_CMDSTS_ERR_WQ_ENABLED) { 256 dev_dbg(dev, "WQ enable failed: %#x\n", status); 257 return -ENXIO; 258 } 259 260 wq->state = IDXD_WQ_ENABLED; 261 dev_dbg(dev, "WQ %d enabled\n", wq->id); 262 return 0; 263 } 264 265 int idxd_wq_disable(struct idxd_wq *wq) 266 { 267 struct idxd_device *idxd = wq->idxd; 268 struct device *dev = &idxd->pdev->dev; 269 u32 status, operand; 270 int rc; 271 272 lockdep_assert_held(&idxd->dev_lock); 273 dev_dbg(dev, "Disabling WQ %d\n", wq->id); 274 275 if (wq->state != IDXD_WQ_ENABLED) { 276 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); 277 return 0; 278 } 279 280 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); 281 rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand); 282 if (rc < 0) 283 return rc; 284 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); 285 if (rc < 0) 286 return rc; 287 288 if (status != IDXD_CMDSTS_SUCCESS) { 289 dev_dbg(dev, "WQ disable failed: %#x\n", status); 290 return -ENXIO; 291 } 292 293 wq->state = IDXD_WQ_DISABLED; 294 dev_dbg(dev, "WQ %d disabled\n", wq->id); 295 return 0; 296 } 297 298 int idxd_wq_map_portal(struct idxd_wq *wq) 299 { 300 struct idxd_device *idxd = wq->idxd; 301 struct pci_dev *pdev = idxd->pdev; 302 struct device *dev = &pdev->dev; 303 resource_size_t start; 304 305 start = pci_resource_start(pdev, IDXD_WQ_BAR); 306 start = start + wq->id * IDXD_PORTAL_SIZE; 307 308 wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); 309 if (!wq->dportal) 310 return -ENOMEM; 311 dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal); 312 313 return 0; 314 } 315 316 void idxd_wq_unmap_portal(struct idxd_wq *wq) 317 { 318 struct device *dev = &wq->idxd->pdev->dev; 319 320 devm_iounmap(dev, wq->dportal); 321 } 322 323 void idxd_wq_disable_cleanup(struct idxd_wq *wq) 324 { 325 struct idxd_device *idxd = wq->idxd; 326 struct device *dev = &idxd->pdev->dev; 327 int i, wq_offset; 328 329 lockdep_assert_held(&idxd->dev_lock); 330 memset(&wq->wqcfg, 0, sizeof(wq->wqcfg)); 331 wq->type = IDXD_WQT_NONE; 332 wq->size = 0; 333 wq->group = NULL; 334 wq->threshold = 0; 335 wq->priority = 0; 336 clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 337 memset(wq->name, 0, WQ_NAME_SIZE); 338 339 for (i = 0; i < 8; i++) { 340 wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); 341 iowrite32(0, idxd->reg_base + wq_offset); 342 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", 343 wq->id, i, wq_offset, 344 ioread32(idxd->reg_base + wq_offset)); 345 } 346 } 347 348 /* Device control bits */ 349 static inline bool idxd_is_enabled(struct idxd_device *idxd) 350 { 351 union gensts_reg gensts; 352 353 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 354 355 if (gensts.state == IDXD_DEVICE_STATE_ENABLED) 356 return true; 357 return false; 358 } 359 360 static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout) 361 { 362 u32 sts, to = timeout; 363 364 lockdep_assert_held(&idxd->dev_lock); 365 sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); 366 while (sts & IDXD_CMDSTS_ACTIVE && --to) { 367 cpu_relax(); 368 sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); 369 } 370 371 if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) { 372 dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__); 373 *status = 0; 374 return -EBUSY; 375 } 376 377 *status = sts; 378 return 0; 379 } 380 381 static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand) 382 { 383 union idxd_command_reg cmd; 384 int rc; 385 u32 status; 386 387 lockdep_assert_held(&idxd->dev_lock); 388 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); 389 if (rc < 0) 390 return rc; 391 392 memset(&cmd, 0, sizeof(cmd)); 393 cmd.cmd = cmd_code; 394 cmd.operand = operand; 395 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", 396 __func__, cmd_code, operand); 397 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 398 399 return 0; 400 } 401 402 int idxd_device_enable(struct idxd_device *idxd) 403 { 404 struct device *dev = &idxd->pdev->dev; 405 int rc; 406 u32 status; 407 408 lockdep_assert_held(&idxd->dev_lock); 409 if (idxd_is_enabled(idxd)) { 410 dev_dbg(dev, "Device already enabled\n"); 411 return -ENXIO; 412 } 413 414 rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0); 415 if (rc < 0) 416 return rc; 417 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); 418 if (rc < 0) 419 return rc; 420 421 /* If the command is successful or if the device was enabled */ 422 if (status != IDXD_CMDSTS_SUCCESS && 423 status != IDXD_CMDSTS_ERR_DEV_ENABLED) { 424 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); 425 return -ENXIO; 426 } 427 428 idxd->state = IDXD_DEV_ENABLED; 429 return 0; 430 } 431 432 int idxd_device_disable(struct idxd_device *idxd) 433 { 434 struct device *dev = &idxd->pdev->dev; 435 int rc; 436 u32 status; 437 438 lockdep_assert_held(&idxd->dev_lock); 439 if (!idxd_is_enabled(idxd)) { 440 dev_dbg(dev, "Device is not enabled\n"); 441 return 0; 442 } 443 444 rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0); 445 if (rc < 0) 446 return rc; 447 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); 448 if (rc < 0) 449 return rc; 450 451 /* If the command is successful or if the device was disabled */ 452 if (status != IDXD_CMDSTS_SUCCESS && 453 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) { 454 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); 455 rc = -ENXIO; 456 return rc; 457 } 458 459 idxd->state = IDXD_DEV_CONF_READY; 460 return 0; 461 } 462 463 int __idxd_device_reset(struct idxd_device *idxd) 464 { 465 u32 status; 466 int rc; 467 468 rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0); 469 if (rc < 0) 470 return rc; 471 rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); 472 if (rc < 0) 473 return rc; 474 475 return 0; 476 } 477 478 int idxd_device_reset(struct idxd_device *idxd) 479 { 480 unsigned long flags; 481 int rc; 482 483 spin_lock_irqsave(&idxd->dev_lock, flags); 484 rc = __idxd_device_reset(idxd); 485 spin_unlock_irqrestore(&idxd->dev_lock, flags); 486 return rc; 487 } 488 489 /* Device configuration bits */ 490 static void idxd_group_config_write(struct idxd_group *group) 491 { 492 struct idxd_device *idxd = group->idxd; 493 struct device *dev = &idxd->pdev->dev; 494 int i; 495 u32 grpcfg_offset; 496 497 dev_dbg(dev, "Writing group %d cfg registers\n", group->id); 498 499 /* setup GRPWQCFG */ 500 for (i = 0; i < 4; i++) { 501 grpcfg_offset = idxd->grpcfg_offset + 502 group->id * 64 + i * sizeof(u64); 503 iowrite64(group->grpcfg.wqs[i], 504 idxd->reg_base + grpcfg_offset); 505 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", 506 group->id, i, grpcfg_offset, 507 ioread64(idxd->reg_base + grpcfg_offset)); 508 } 509 510 /* setup GRPENGCFG */ 511 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32; 512 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); 513 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, 514 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); 515 516 /* setup GRPFLAGS */ 517 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40; 518 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); 519 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", 520 group->id, grpcfg_offset, 521 ioread32(idxd->reg_base + grpcfg_offset)); 522 } 523 524 static int idxd_groups_config_write(struct idxd_device *idxd) 525 526 { 527 union gencfg_reg reg; 528 int i; 529 struct device *dev = &idxd->pdev->dev; 530 531 /* Setup bandwidth token limit */ 532 if (idxd->token_limit) { 533 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 534 reg.token_limit = idxd->token_limit; 535 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); 536 } 537 538 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET, 539 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); 540 541 for (i = 0; i < idxd->max_groups; i++) { 542 struct idxd_group *group = &idxd->groups[i]; 543 544 idxd_group_config_write(group); 545 } 546 547 return 0; 548 } 549 550 static int idxd_wq_config_write(struct idxd_wq *wq) 551 { 552 struct idxd_device *idxd = wq->idxd; 553 struct device *dev = &idxd->pdev->dev; 554 u32 wq_offset; 555 int i; 556 557 if (!wq->group) 558 return 0; 559 560 memset(&wq->wqcfg, 0, sizeof(union wqcfg)); 561 562 /* byte 0-3 */ 563 wq->wqcfg.wq_size = wq->size; 564 565 if (wq->size == 0) { 566 dev_warn(dev, "Incorrect work queue size: 0\n"); 567 return -EINVAL; 568 } 569 570 /* bytes 4-7 */ 571 wq->wqcfg.wq_thresh = wq->threshold; 572 573 /* byte 8-11 */ 574 wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL); 575 wq->wqcfg.mode = 1; 576 577 wq->wqcfg.priority = wq->priority; 578 579 /* bytes 12-15 */ 580 wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift; 581 wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift; 582 583 dev_dbg(dev, "WQ %d CFGs\n", wq->id); 584 for (i = 0; i < 8; i++) { 585 wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); 586 iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset); 587 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", 588 wq->id, i, wq_offset, 589 ioread32(idxd->reg_base + wq_offset)); 590 } 591 592 return 0; 593 } 594 595 static int idxd_wqs_config_write(struct idxd_device *idxd) 596 { 597 int i, rc; 598 599 for (i = 0; i < idxd->max_wqs; i++) { 600 struct idxd_wq *wq = &idxd->wqs[i]; 601 602 rc = idxd_wq_config_write(wq); 603 if (rc < 0) 604 return rc; 605 } 606 607 return 0; 608 } 609 610 static void idxd_group_flags_setup(struct idxd_device *idxd) 611 { 612 int i; 613 614 /* TC-A 0 and TC-B 1 should be defaults */ 615 for (i = 0; i < idxd->max_groups; i++) { 616 struct idxd_group *group = &idxd->groups[i]; 617 618 if (group->tc_a == -1) 619 group->tc_a = group->grpcfg.flags.tc_a = 0; 620 else 621 group->grpcfg.flags.tc_a = group->tc_a; 622 if (group->tc_b == -1) 623 group->tc_b = group->grpcfg.flags.tc_b = 1; 624 else 625 group->grpcfg.flags.tc_b = group->tc_b; 626 group->grpcfg.flags.use_token_limit = group->use_token_limit; 627 group->grpcfg.flags.tokens_reserved = group->tokens_reserved; 628 if (group->tokens_allowed) 629 group->grpcfg.flags.tokens_allowed = 630 group->tokens_allowed; 631 else 632 group->grpcfg.flags.tokens_allowed = idxd->max_tokens; 633 } 634 } 635 636 static int idxd_engines_setup(struct idxd_device *idxd) 637 { 638 int i, engines = 0; 639 struct idxd_engine *eng; 640 struct idxd_group *group; 641 642 for (i = 0; i < idxd->max_groups; i++) { 643 group = &idxd->groups[i]; 644 group->grpcfg.engines = 0; 645 } 646 647 for (i = 0; i < idxd->max_engines; i++) { 648 eng = &idxd->engines[i]; 649 group = eng->group; 650 651 if (!group) 652 continue; 653 654 group->grpcfg.engines |= BIT(eng->id); 655 engines++; 656 } 657 658 if (!engines) 659 return -EINVAL; 660 661 return 0; 662 } 663 664 static int idxd_wqs_setup(struct idxd_device *idxd) 665 { 666 struct idxd_wq *wq; 667 struct idxd_group *group; 668 int i, j, configured = 0; 669 struct device *dev = &idxd->pdev->dev; 670 671 for (i = 0; i < idxd->max_groups; i++) { 672 group = &idxd->groups[i]; 673 for (j = 0; j < 4; j++) 674 group->grpcfg.wqs[j] = 0; 675 } 676 677 for (i = 0; i < idxd->max_wqs; i++) { 678 wq = &idxd->wqs[i]; 679 group = wq->group; 680 681 if (!wq->group) 682 continue; 683 if (!wq->size) 684 continue; 685 686 if (!wq_dedicated(wq)) { 687 dev_warn(dev, "No shared workqueue support.\n"); 688 return -EINVAL; 689 } 690 691 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); 692 configured++; 693 } 694 695 if (configured == 0) 696 return -EINVAL; 697 698 return 0; 699 } 700 701 int idxd_device_config(struct idxd_device *idxd) 702 { 703 int rc; 704 705 lockdep_assert_held(&idxd->dev_lock); 706 rc = idxd_wqs_setup(idxd); 707 if (rc < 0) 708 return rc; 709 710 rc = idxd_engines_setup(idxd); 711 if (rc < 0) 712 return rc; 713 714 idxd_group_flags_setup(idxd); 715 716 rc = idxd_wqs_config_write(idxd); 717 if (rc < 0) 718 return rc; 719 720 rc = idxd_groups_config_write(idxd); 721 if (rc < 0) 722 return rc; 723 724 return 0; 725 } 726