1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/workqueue.h> 12 #include <linux/aer.h> 13 #include <linux/fs.h> 14 #include <linux/io-64-nonatomic-lo-hi.h> 15 #include <linux/device.h> 16 #include <linux/idr.h> 17 #include <linux/intel-svm.h> 18 #include <linux/iommu.h> 19 #include <uapi/linux/idxd.h> 20 #include <linux/dmaengine.h> 21 #include "../dmaengine.h" 22 #include "registers.h" 23 #include "idxd.h" 24 #include "perfmon.h" 25 26 MODULE_VERSION(IDXD_DRIVER_VERSION); 27 MODULE_LICENSE("GPL v2"); 28 MODULE_AUTHOR("Intel Corporation"); 29 MODULE_IMPORT_NS(IDXD); 30 31 static bool sva = true; 32 module_param(sva, bool, 0644); 33 MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 34 35 bool tc_override; 36 module_param(tc_override, bool, 0644); 37 MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 38 39 #define DRV_NAME "idxd" 40 41 bool support_enqcmd; 42 DEFINE_IDA(idxd_ida); 43 44 static struct idxd_driver_data idxd_driver_data[] = { 45 [IDXD_TYPE_DSA] = { 46 .name_prefix = "dsa", 47 .type = IDXD_TYPE_DSA, 48 .compl_size = sizeof(struct dsa_completion_record), 49 .align = 32, 50 .dev_type = &dsa_device_type, 51 }, 52 [IDXD_TYPE_IAX] = { 53 .name_prefix = "iax", 54 .type = IDXD_TYPE_IAX, 55 .compl_size = sizeof(struct iax_completion_record), 56 .align = 64, 57 .dev_type = &iax_device_type, 58 }, 59 }; 60 61 static struct pci_device_id idxd_pci_tbl[] = { 62 /* DSA ver 1.0 platforms */ 63 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 64 65 /* IAX ver 1.0 platforms */ 66 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 67 { 0, } 68 }; 69 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 70 71 static int idxd_setup_interrupts(struct idxd_device *idxd) 72 { 73 struct pci_dev *pdev = idxd->pdev; 74 struct device *dev = &pdev->dev; 75 struct idxd_irq_entry *ie; 76 int i, msixcnt; 77 int rc = 0; 78 79 msixcnt = pci_msix_vec_count(pdev); 80 if (msixcnt < 0) { 81 dev_err(dev, "Not MSI-X interrupt capable.\n"); 82 return -ENOSPC; 83 } 84 idxd->irq_cnt = msixcnt; 85 86 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 87 if (rc != msixcnt) { 88 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 89 return -ENOSPC; 90 } 91 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 92 93 94 ie = idxd_get_ie(idxd, 0); 95 ie->vector = pci_irq_vector(pdev, 0); 96 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); 97 if (rc < 0) { 98 dev_err(dev, "Failed to allocate misc interrupt.\n"); 99 goto err_misc_irq; 100 } 101 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); 102 103 for (i = 0; i < idxd->max_wqs; i++) { 104 int msix_idx = i + 1; 105 106 ie = idxd_get_ie(idxd, msix_idx); 107 ie->id = msix_idx; 108 ie->int_handle = INVALID_INT_HANDLE; 109 ie->pasid = INVALID_IOASID; 110 111 spin_lock_init(&ie->list_lock); 112 init_llist_head(&ie->pending_llist); 113 INIT_LIST_HEAD(&ie->work_list); 114 } 115 116 idxd_unmask_error_interrupts(idxd); 117 return 0; 118 119 err_misc_irq: 120 idxd_mask_error_interrupts(idxd); 121 pci_free_irq_vectors(pdev); 122 dev_err(dev, "No usable interrupts\n"); 123 return rc; 124 } 125 126 static void idxd_cleanup_interrupts(struct idxd_device *idxd) 127 { 128 struct pci_dev *pdev = idxd->pdev; 129 struct idxd_irq_entry *ie; 130 int msixcnt; 131 132 msixcnt = pci_msix_vec_count(pdev); 133 if (msixcnt <= 0) 134 return; 135 136 ie = idxd_get_ie(idxd, 0); 137 idxd_mask_error_interrupts(idxd); 138 free_irq(ie->vector, ie); 139 pci_free_irq_vectors(pdev); 140 } 141 142 static int idxd_setup_wqs(struct idxd_device *idxd) 143 { 144 struct device *dev = &idxd->pdev->dev; 145 struct idxd_wq *wq; 146 struct device *conf_dev; 147 int i, rc; 148 149 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 150 GFP_KERNEL, dev_to_node(dev)); 151 if (!idxd->wqs) 152 return -ENOMEM; 153 154 for (i = 0; i < idxd->max_wqs; i++) { 155 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 156 if (!wq) { 157 rc = -ENOMEM; 158 goto err; 159 } 160 161 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 162 conf_dev = wq_confdev(wq); 163 wq->id = i; 164 wq->idxd = idxd; 165 device_initialize(wq_confdev(wq)); 166 conf_dev->parent = idxd_confdev(idxd); 167 conf_dev->bus = &dsa_bus_type; 168 conf_dev->type = &idxd_wq_device_type; 169 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 170 if (rc < 0) { 171 put_device(conf_dev); 172 goto err; 173 } 174 175 mutex_init(&wq->wq_lock); 176 init_waitqueue_head(&wq->err_queue); 177 init_completion(&wq->wq_dead); 178 init_completion(&wq->wq_resurrect); 179 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 180 wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; 181 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 182 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 183 if (!wq->wqcfg) { 184 put_device(conf_dev); 185 rc = -ENOMEM; 186 goto err; 187 } 188 idxd->wqs[i] = wq; 189 } 190 191 return 0; 192 193 err: 194 while (--i >= 0) { 195 wq = idxd->wqs[i]; 196 conf_dev = wq_confdev(wq); 197 put_device(conf_dev); 198 } 199 return rc; 200 } 201 202 static int idxd_setup_engines(struct idxd_device *idxd) 203 { 204 struct idxd_engine *engine; 205 struct device *dev = &idxd->pdev->dev; 206 struct device *conf_dev; 207 int i, rc; 208 209 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 210 GFP_KERNEL, dev_to_node(dev)); 211 if (!idxd->engines) 212 return -ENOMEM; 213 214 for (i = 0; i < idxd->max_engines; i++) { 215 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 216 if (!engine) { 217 rc = -ENOMEM; 218 goto err; 219 } 220 221 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 222 conf_dev = engine_confdev(engine); 223 engine->id = i; 224 engine->idxd = idxd; 225 device_initialize(conf_dev); 226 conf_dev->parent = idxd_confdev(idxd); 227 conf_dev->bus = &dsa_bus_type; 228 conf_dev->type = &idxd_engine_device_type; 229 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 230 if (rc < 0) { 231 put_device(conf_dev); 232 goto err; 233 } 234 235 idxd->engines[i] = engine; 236 } 237 238 return 0; 239 240 err: 241 while (--i >= 0) { 242 engine = idxd->engines[i]; 243 conf_dev = engine_confdev(engine); 244 put_device(conf_dev); 245 } 246 return rc; 247 } 248 249 static int idxd_setup_groups(struct idxd_device *idxd) 250 { 251 struct device *dev = &idxd->pdev->dev; 252 struct device *conf_dev; 253 struct idxd_group *group; 254 int i, rc; 255 256 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 257 GFP_KERNEL, dev_to_node(dev)); 258 if (!idxd->groups) 259 return -ENOMEM; 260 261 for (i = 0; i < idxd->max_groups; i++) { 262 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 263 if (!group) { 264 rc = -ENOMEM; 265 goto err; 266 } 267 268 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 269 conf_dev = group_confdev(group); 270 group->id = i; 271 group->idxd = idxd; 272 device_initialize(conf_dev); 273 conf_dev->parent = idxd_confdev(idxd); 274 conf_dev->bus = &dsa_bus_type; 275 conf_dev->type = &idxd_group_device_type; 276 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 277 if (rc < 0) { 278 put_device(conf_dev); 279 goto err; 280 } 281 282 idxd->groups[i] = group; 283 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { 284 group->tc_a = 1; 285 group->tc_b = 1; 286 } else { 287 group->tc_a = -1; 288 group->tc_b = -1; 289 } 290 } 291 292 return 0; 293 294 err: 295 while (--i >= 0) { 296 group = idxd->groups[i]; 297 put_device(group_confdev(group)); 298 } 299 return rc; 300 } 301 302 static void idxd_cleanup_internals(struct idxd_device *idxd) 303 { 304 int i; 305 306 for (i = 0; i < idxd->max_groups; i++) 307 put_device(group_confdev(idxd->groups[i])); 308 for (i = 0; i < idxd->max_engines; i++) 309 put_device(engine_confdev(idxd->engines[i])); 310 for (i = 0; i < idxd->max_wqs; i++) 311 put_device(wq_confdev(idxd->wqs[i])); 312 destroy_workqueue(idxd->wq); 313 } 314 315 static int idxd_setup_internals(struct idxd_device *idxd) 316 { 317 struct device *dev = &idxd->pdev->dev; 318 int rc, i; 319 320 init_waitqueue_head(&idxd->cmd_waitq); 321 322 rc = idxd_setup_wqs(idxd); 323 if (rc < 0) 324 goto err_wqs; 325 326 rc = idxd_setup_engines(idxd); 327 if (rc < 0) 328 goto err_engine; 329 330 rc = idxd_setup_groups(idxd); 331 if (rc < 0) 332 goto err_group; 333 334 idxd->wq = create_workqueue(dev_name(dev)); 335 if (!idxd->wq) { 336 rc = -ENOMEM; 337 goto err_wkq_create; 338 } 339 340 return 0; 341 342 err_wkq_create: 343 for (i = 0; i < idxd->max_groups; i++) 344 put_device(group_confdev(idxd->groups[i])); 345 err_group: 346 for (i = 0; i < idxd->max_engines; i++) 347 put_device(engine_confdev(idxd->engines[i])); 348 err_engine: 349 for (i = 0; i < idxd->max_wqs; i++) 350 put_device(wq_confdev(idxd->wqs[i])); 351 err_wqs: 352 return rc; 353 } 354 355 static void idxd_read_table_offsets(struct idxd_device *idxd) 356 { 357 union offsets_reg offsets; 358 struct device *dev = &idxd->pdev->dev; 359 360 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 361 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 362 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 363 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 364 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 365 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 366 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 367 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 368 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 369 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 370 } 371 372 static void idxd_read_caps(struct idxd_device *idxd) 373 { 374 struct device *dev = &idxd->pdev->dev; 375 int i; 376 377 /* reading generic capabilities */ 378 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 379 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 380 381 if (idxd->hw.gen_cap.cmd_cap) { 382 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 383 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 384 } 385 386 /* reading command capabilities */ 387 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 388 idxd->request_int_handles = true; 389 390 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 391 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 392 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; 393 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 394 if (idxd->hw.gen_cap.config_en) 395 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 396 397 /* reading group capabilities */ 398 idxd->hw.group_cap.bits = 399 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 400 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 401 idxd->max_groups = idxd->hw.group_cap.num_groups; 402 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 403 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; 404 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); 405 idxd->nr_rdbufs = idxd->max_rdbufs; 406 407 /* read engine capabilities */ 408 idxd->hw.engine_cap.bits = 409 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 410 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 411 idxd->max_engines = idxd->hw.engine_cap.num_engines; 412 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 413 414 /* read workqueue capabilities */ 415 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 416 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 417 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 418 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 419 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 420 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 421 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 422 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 423 424 /* reading operation capabilities */ 425 for (i = 0; i < 4; i++) { 426 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 427 IDXD_OPCAP_OFFSET + i * sizeof(u64)); 428 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 429 } 430 } 431 432 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 433 { 434 struct device *dev = &pdev->dev; 435 struct device *conf_dev; 436 struct idxd_device *idxd; 437 int rc; 438 439 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 440 if (!idxd) 441 return NULL; 442 443 conf_dev = idxd_confdev(idxd); 444 idxd->pdev = pdev; 445 idxd->data = data; 446 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 447 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 448 if (idxd->id < 0) 449 return NULL; 450 451 device_initialize(conf_dev); 452 conf_dev->parent = dev; 453 conf_dev->bus = &dsa_bus_type; 454 conf_dev->type = idxd->data->dev_type; 455 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 456 if (rc < 0) { 457 put_device(conf_dev); 458 return NULL; 459 } 460 461 spin_lock_init(&idxd->dev_lock); 462 spin_lock_init(&idxd->cmd_lock); 463 464 return idxd; 465 } 466 467 static int idxd_enable_system_pasid(struct idxd_device *idxd) 468 { 469 int flags; 470 unsigned int pasid; 471 struct iommu_sva *sva; 472 473 flags = SVM_FLAG_SUPERVISOR_MODE; 474 475 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); 476 if (IS_ERR(sva)) { 477 dev_warn(&idxd->pdev->dev, 478 "iommu sva bind failed: %ld\n", PTR_ERR(sva)); 479 return PTR_ERR(sva); 480 } 481 482 pasid = iommu_sva_get_pasid(sva); 483 if (pasid == IOMMU_PASID_INVALID) { 484 iommu_sva_unbind_device(sva); 485 return -ENODEV; 486 } 487 488 idxd->sva = sva; 489 idxd->pasid = pasid; 490 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); 491 return 0; 492 } 493 494 static void idxd_disable_system_pasid(struct idxd_device *idxd) 495 { 496 497 iommu_sva_unbind_device(idxd->sva); 498 idxd->sva = NULL; 499 } 500 501 static int idxd_probe(struct idxd_device *idxd) 502 { 503 struct pci_dev *pdev = idxd->pdev; 504 struct device *dev = &pdev->dev; 505 int rc; 506 507 dev_dbg(dev, "%s entered and resetting device\n", __func__); 508 rc = idxd_device_init_reset(idxd); 509 if (rc < 0) 510 return rc; 511 512 dev_dbg(dev, "IDXD reset complete\n"); 513 514 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 515 rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); 516 if (rc == 0) { 517 rc = idxd_enable_system_pasid(idxd); 518 if (rc < 0) { 519 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 520 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); 521 } else { 522 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 523 } 524 } else { 525 dev_warn(dev, "Unable to turn on SVA feature.\n"); 526 } 527 } else if (!sva) { 528 dev_warn(dev, "User forced SVA off via module param.\n"); 529 } 530 531 idxd_read_caps(idxd); 532 idxd_read_table_offsets(idxd); 533 534 rc = idxd_setup_internals(idxd); 535 if (rc) 536 goto err; 537 538 /* If the configs are readonly, then load them from device */ 539 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 540 dev_dbg(dev, "Loading RO device config\n"); 541 rc = idxd_device_load_config(idxd); 542 if (rc < 0) 543 goto err_config; 544 } 545 546 rc = idxd_setup_interrupts(idxd); 547 if (rc) 548 goto err_config; 549 550 idxd->major = idxd_cdev_get_major(idxd); 551 552 rc = perfmon_pmu_init(idxd); 553 if (rc < 0) 554 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 555 556 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 557 return 0; 558 559 err_config: 560 idxd_cleanup_internals(idxd); 561 err: 562 if (device_pasid_enabled(idxd)) 563 idxd_disable_system_pasid(idxd); 564 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 565 return rc; 566 } 567 568 static void idxd_cleanup(struct idxd_device *idxd) 569 { 570 struct device *dev = &idxd->pdev->dev; 571 572 perfmon_pmu_remove(idxd); 573 idxd_cleanup_interrupts(idxd); 574 idxd_cleanup_internals(idxd); 575 if (device_pasid_enabled(idxd)) 576 idxd_disable_system_pasid(idxd); 577 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 578 } 579 580 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 581 { 582 struct device *dev = &pdev->dev; 583 struct idxd_device *idxd; 584 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 585 int rc; 586 587 rc = pci_enable_device(pdev); 588 if (rc) 589 return rc; 590 591 dev_dbg(dev, "Alloc IDXD context\n"); 592 idxd = idxd_alloc(pdev, data); 593 if (!idxd) { 594 rc = -ENOMEM; 595 goto err_idxd_alloc; 596 } 597 598 dev_dbg(dev, "Mapping BARs\n"); 599 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 600 if (!idxd->reg_base) { 601 rc = -ENOMEM; 602 goto err_iomap; 603 } 604 605 dev_dbg(dev, "Set DMA masks\n"); 606 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 607 if (rc) 608 goto err; 609 610 dev_dbg(dev, "Set PCI master\n"); 611 pci_set_master(pdev); 612 pci_set_drvdata(pdev, idxd); 613 614 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 615 rc = idxd_probe(idxd); 616 if (rc) { 617 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 618 goto err; 619 } 620 621 rc = idxd_register_devices(idxd); 622 if (rc) { 623 dev_err(dev, "IDXD sysfs setup failed\n"); 624 goto err_dev_register; 625 } 626 627 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 628 idxd->hw.version); 629 630 return 0; 631 632 err_dev_register: 633 idxd_cleanup(idxd); 634 err: 635 pci_iounmap(pdev, idxd->reg_base); 636 err_iomap: 637 put_device(idxd_confdev(idxd)); 638 err_idxd_alloc: 639 pci_disable_device(pdev); 640 return rc; 641 } 642 643 void idxd_wqs_quiesce(struct idxd_device *idxd) 644 { 645 struct idxd_wq *wq; 646 int i; 647 648 for (i = 0; i < idxd->max_wqs; i++) { 649 wq = idxd->wqs[i]; 650 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 651 idxd_wq_quiesce(wq); 652 } 653 } 654 655 static void idxd_shutdown(struct pci_dev *pdev) 656 { 657 struct idxd_device *idxd = pci_get_drvdata(pdev); 658 struct idxd_irq_entry *irq_entry; 659 int rc; 660 661 rc = idxd_device_disable(idxd); 662 if (rc) 663 dev_err(&pdev->dev, "Disabling device failed\n"); 664 665 irq_entry = &idxd->ie; 666 synchronize_irq(irq_entry->vector); 667 idxd_mask_error_interrupts(idxd); 668 flush_workqueue(idxd->wq); 669 } 670 671 static void idxd_remove(struct pci_dev *pdev) 672 { 673 struct idxd_device *idxd = pci_get_drvdata(pdev); 674 struct idxd_irq_entry *irq_entry; 675 676 idxd_unregister_devices(idxd); 677 /* 678 * When ->release() is called for the idxd->conf_dev, it frees all the memory related 679 * to the idxd context. The driver still needs those bits in order to do the rest of 680 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 681 * on the device here to hold off the freeing while allowing the idxd sub-driver 682 * to unbind. 683 */ 684 get_device(idxd_confdev(idxd)); 685 device_unregister(idxd_confdev(idxd)); 686 idxd_shutdown(pdev); 687 if (device_pasid_enabled(idxd)) 688 idxd_disable_system_pasid(idxd); 689 690 irq_entry = idxd_get_ie(idxd, 0); 691 free_irq(irq_entry->vector, irq_entry); 692 pci_free_irq_vectors(pdev); 693 pci_iounmap(pdev, idxd->reg_base); 694 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 695 pci_disable_device(pdev); 696 destroy_workqueue(idxd->wq); 697 perfmon_pmu_remove(idxd); 698 put_device(idxd_confdev(idxd)); 699 } 700 701 static struct pci_driver idxd_pci_driver = { 702 .name = DRV_NAME, 703 .id_table = idxd_pci_tbl, 704 .probe = idxd_pci_probe, 705 .remove = idxd_remove, 706 .shutdown = idxd_shutdown, 707 }; 708 709 static int __init idxd_init_module(void) 710 { 711 int err; 712 713 /* 714 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 715 * enumerating the device. We can not utilize it. 716 */ 717 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 718 pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 719 return -ENODEV; 720 } 721 722 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 723 pr_warn("Platform does not have ENQCMD(S) support.\n"); 724 else 725 support_enqcmd = true; 726 727 perfmon_init(); 728 729 err = idxd_driver_register(&idxd_drv); 730 if (err < 0) 731 goto err_idxd_driver_register; 732 733 err = idxd_driver_register(&idxd_dmaengine_drv); 734 if (err < 0) 735 goto err_idxd_dmaengine_driver_register; 736 737 err = idxd_driver_register(&idxd_user_drv); 738 if (err < 0) 739 goto err_idxd_user_driver_register; 740 741 err = idxd_cdev_register(); 742 if (err) 743 goto err_cdev_register; 744 745 err = pci_register_driver(&idxd_pci_driver); 746 if (err) 747 goto err_pci_register; 748 749 return 0; 750 751 err_pci_register: 752 idxd_cdev_remove(); 753 err_cdev_register: 754 idxd_driver_unregister(&idxd_user_drv); 755 err_idxd_user_driver_register: 756 idxd_driver_unregister(&idxd_dmaengine_drv); 757 err_idxd_dmaengine_driver_register: 758 idxd_driver_unregister(&idxd_drv); 759 err_idxd_driver_register: 760 return err; 761 } 762 module_init(idxd_init_module); 763 764 static void __exit idxd_exit_module(void) 765 { 766 idxd_driver_unregister(&idxd_user_drv); 767 idxd_driver_unregister(&idxd_dmaengine_drv); 768 idxd_driver_unregister(&idxd_drv); 769 pci_unregister_driver(&idxd_pci_driver); 770 idxd_cdev_remove(); 771 perfmon_exit(); 772 } 773 module_exit(idxd_exit_module); 774