1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/workqueue.h> 12 #include <linux/aer.h> 13 #include <linux/fs.h> 14 #include <linux/io-64-nonatomic-lo-hi.h> 15 #include <linux/device.h> 16 #include <linux/idr.h> 17 #include <linux/iommu.h> 18 #include <uapi/linux/idxd.h> 19 #include <linux/dmaengine.h> 20 #include "../dmaengine.h" 21 #include "registers.h" 22 #include "idxd.h" 23 #include "perfmon.h" 24 25 MODULE_VERSION(IDXD_DRIVER_VERSION); 26 MODULE_LICENSE("GPL v2"); 27 MODULE_AUTHOR("Intel Corporation"); 28 MODULE_IMPORT_NS(IDXD); 29 30 static bool sva = true; 31 module_param(sva, bool, 0644); 32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 33 34 bool tc_override; 35 module_param(tc_override, bool, 0644); 36 MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 37 38 #define DRV_NAME "idxd" 39 40 bool support_enqcmd; 41 DEFINE_IDA(idxd_ida); 42 43 static struct idxd_driver_data idxd_driver_data[] = { 44 [IDXD_TYPE_DSA] = { 45 .name_prefix = "dsa", 46 .type = IDXD_TYPE_DSA, 47 .compl_size = sizeof(struct dsa_completion_record), 48 .align = 32, 49 .dev_type = &dsa_device_type, 50 }, 51 [IDXD_TYPE_IAX] = { 52 .name_prefix = "iax", 53 .type = IDXD_TYPE_IAX, 54 .compl_size = sizeof(struct iax_completion_record), 55 .align = 64, 56 .dev_type = &iax_device_type, 57 }, 58 }; 59 60 static struct pci_device_id idxd_pci_tbl[] = { 61 /* DSA ver 1.0 platforms */ 62 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 63 64 /* IAX ver 1.0 platforms */ 65 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 66 { 0, } 67 }; 68 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 69 70 static int idxd_setup_interrupts(struct idxd_device *idxd) 71 { 72 struct pci_dev *pdev = idxd->pdev; 73 struct device *dev = &pdev->dev; 74 struct idxd_irq_entry *ie; 75 int i, msixcnt; 76 int rc = 0; 77 78 msixcnt = pci_msix_vec_count(pdev); 79 if (msixcnt < 0) { 80 dev_err(dev, "Not MSI-X interrupt capable.\n"); 81 return -ENOSPC; 82 } 83 idxd->irq_cnt = msixcnt; 84 85 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 86 if (rc != msixcnt) { 87 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 88 return -ENOSPC; 89 } 90 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 91 92 93 ie = idxd_get_ie(idxd, 0); 94 ie->vector = pci_irq_vector(pdev, 0); 95 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); 96 if (rc < 0) { 97 dev_err(dev, "Failed to allocate misc interrupt.\n"); 98 goto err_misc_irq; 99 } 100 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); 101 102 for (i = 0; i < idxd->max_wqs; i++) { 103 int msix_idx = i + 1; 104 105 ie = idxd_get_ie(idxd, msix_idx); 106 ie->id = msix_idx; 107 ie->int_handle = INVALID_INT_HANDLE; 108 ie->pasid = INVALID_IOASID; 109 110 spin_lock_init(&ie->list_lock); 111 init_llist_head(&ie->pending_llist); 112 INIT_LIST_HEAD(&ie->work_list); 113 } 114 115 idxd_unmask_error_interrupts(idxd); 116 return 0; 117 118 err_misc_irq: 119 idxd_mask_error_interrupts(idxd); 120 pci_free_irq_vectors(pdev); 121 dev_err(dev, "No usable interrupts\n"); 122 return rc; 123 } 124 125 static void idxd_cleanup_interrupts(struct idxd_device *idxd) 126 { 127 struct pci_dev *pdev = idxd->pdev; 128 struct idxd_irq_entry *ie; 129 int msixcnt; 130 131 msixcnt = pci_msix_vec_count(pdev); 132 if (msixcnt <= 0) 133 return; 134 135 ie = idxd_get_ie(idxd, 0); 136 idxd_mask_error_interrupts(idxd); 137 free_irq(ie->vector, ie); 138 pci_free_irq_vectors(pdev); 139 } 140 141 static int idxd_setup_wqs(struct idxd_device *idxd) 142 { 143 struct device *dev = &idxd->pdev->dev; 144 struct idxd_wq *wq; 145 struct device *conf_dev; 146 int i, rc; 147 148 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 149 GFP_KERNEL, dev_to_node(dev)); 150 if (!idxd->wqs) 151 return -ENOMEM; 152 153 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); 154 if (!idxd->wq_enable_map) { 155 kfree(idxd->wqs); 156 return -ENOMEM; 157 } 158 159 for (i = 0; i < idxd->max_wqs; i++) { 160 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 161 if (!wq) { 162 rc = -ENOMEM; 163 goto err; 164 } 165 166 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 167 conf_dev = wq_confdev(wq); 168 wq->id = i; 169 wq->idxd = idxd; 170 device_initialize(wq_confdev(wq)); 171 conf_dev->parent = idxd_confdev(idxd); 172 conf_dev->bus = &dsa_bus_type; 173 conf_dev->type = &idxd_wq_device_type; 174 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 175 if (rc < 0) { 176 put_device(conf_dev); 177 goto err; 178 } 179 180 mutex_init(&wq->wq_lock); 181 init_waitqueue_head(&wq->err_queue); 182 init_completion(&wq->wq_dead); 183 init_completion(&wq->wq_resurrect); 184 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 185 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); 186 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 187 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 188 if (!wq->wqcfg) { 189 put_device(conf_dev); 190 rc = -ENOMEM; 191 goto err; 192 } 193 194 if (idxd->hw.wq_cap.op_config) { 195 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); 196 if (!wq->opcap_bmap) { 197 put_device(conf_dev); 198 rc = -ENOMEM; 199 goto err; 200 } 201 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); 202 } 203 idxd->wqs[i] = wq; 204 } 205 206 return 0; 207 208 err: 209 while (--i >= 0) { 210 wq = idxd->wqs[i]; 211 conf_dev = wq_confdev(wq); 212 put_device(conf_dev); 213 } 214 return rc; 215 } 216 217 static int idxd_setup_engines(struct idxd_device *idxd) 218 { 219 struct idxd_engine *engine; 220 struct device *dev = &idxd->pdev->dev; 221 struct device *conf_dev; 222 int i, rc; 223 224 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 225 GFP_KERNEL, dev_to_node(dev)); 226 if (!idxd->engines) 227 return -ENOMEM; 228 229 for (i = 0; i < idxd->max_engines; i++) { 230 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 231 if (!engine) { 232 rc = -ENOMEM; 233 goto err; 234 } 235 236 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 237 conf_dev = engine_confdev(engine); 238 engine->id = i; 239 engine->idxd = idxd; 240 device_initialize(conf_dev); 241 conf_dev->parent = idxd_confdev(idxd); 242 conf_dev->bus = &dsa_bus_type; 243 conf_dev->type = &idxd_engine_device_type; 244 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 245 if (rc < 0) { 246 put_device(conf_dev); 247 goto err; 248 } 249 250 idxd->engines[i] = engine; 251 } 252 253 return 0; 254 255 err: 256 while (--i >= 0) { 257 engine = idxd->engines[i]; 258 conf_dev = engine_confdev(engine); 259 put_device(conf_dev); 260 } 261 return rc; 262 } 263 264 static int idxd_setup_groups(struct idxd_device *idxd) 265 { 266 struct device *dev = &idxd->pdev->dev; 267 struct device *conf_dev; 268 struct idxd_group *group; 269 int i, rc; 270 271 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 272 GFP_KERNEL, dev_to_node(dev)); 273 if (!idxd->groups) 274 return -ENOMEM; 275 276 for (i = 0; i < idxd->max_groups; i++) { 277 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 278 if (!group) { 279 rc = -ENOMEM; 280 goto err; 281 } 282 283 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 284 conf_dev = group_confdev(group); 285 group->id = i; 286 group->idxd = idxd; 287 device_initialize(conf_dev); 288 conf_dev->parent = idxd_confdev(idxd); 289 conf_dev->bus = &dsa_bus_type; 290 conf_dev->type = &idxd_group_device_type; 291 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 292 if (rc < 0) { 293 put_device(conf_dev); 294 goto err; 295 } 296 297 idxd->groups[i] = group; 298 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { 299 group->tc_a = 1; 300 group->tc_b = 1; 301 } else { 302 group->tc_a = -1; 303 group->tc_b = -1; 304 } 305 /* 306 * The default value is the same as the value of 307 * total read buffers in GRPCAP. 308 */ 309 group->rdbufs_allowed = idxd->max_rdbufs; 310 } 311 312 return 0; 313 314 err: 315 while (--i >= 0) { 316 group = idxd->groups[i]; 317 put_device(group_confdev(group)); 318 } 319 return rc; 320 } 321 322 static void idxd_cleanup_internals(struct idxd_device *idxd) 323 { 324 int i; 325 326 for (i = 0; i < idxd->max_groups; i++) 327 put_device(group_confdev(idxd->groups[i])); 328 for (i = 0; i < idxd->max_engines; i++) 329 put_device(engine_confdev(idxd->engines[i])); 330 for (i = 0; i < idxd->max_wqs; i++) 331 put_device(wq_confdev(idxd->wqs[i])); 332 destroy_workqueue(idxd->wq); 333 } 334 335 static int idxd_setup_internals(struct idxd_device *idxd) 336 { 337 struct device *dev = &idxd->pdev->dev; 338 int rc, i; 339 340 init_waitqueue_head(&idxd->cmd_waitq); 341 342 rc = idxd_setup_wqs(idxd); 343 if (rc < 0) 344 goto err_wqs; 345 346 rc = idxd_setup_engines(idxd); 347 if (rc < 0) 348 goto err_engine; 349 350 rc = idxd_setup_groups(idxd); 351 if (rc < 0) 352 goto err_group; 353 354 idxd->wq = create_workqueue(dev_name(dev)); 355 if (!idxd->wq) { 356 rc = -ENOMEM; 357 goto err_wkq_create; 358 } 359 360 return 0; 361 362 err_wkq_create: 363 for (i = 0; i < idxd->max_groups; i++) 364 put_device(group_confdev(idxd->groups[i])); 365 err_group: 366 for (i = 0; i < idxd->max_engines; i++) 367 put_device(engine_confdev(idxd->engines[i])); 368 err_engine: 369 for (i = 0; i < idxd->max_wqs; i++) 370 put_device(wq_confdev(idxd->wqs[i])); 371 err_wqs: 372 return rc; 373 } 374 375 static void idxd_read_table_offsets(struct idxd_device *idxd) 376 { 377 union offsets_reg offsets; 378 struct device *dev = &idxd->pdev->dev; 379 380 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 381 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 382 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 383 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 384 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 385 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 386 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 387 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 388 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 389 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 390 } 391 392 static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count) 393 { 394 int i, j, nr; 395 396 for (i = 0, nr = 0; i < count; i++) { 397 for (j = 0; j < BITS_PER_LONG_LONG; j++) { 398 if (val[i] & BIT(j)) 399 set_bit(nr, bmap); 400 nr++; 401 } 402 } 403 } 404 405 static void idxd_read_caps(struct idxd_device *idxd) 406 { 407 struct device *dev = &idxd->pdev->dev; 408 int i; 409 410 /* reading generic capabilities */ 411 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 412 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 413 414 if (idxd->hw.gen_cap.cmd_cap) { 415 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 416 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 417 } 418 419 /* reading command capabilities */ 420 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 421 idxd->request_int_handles = true; 422 423 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 424 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 425 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); 426 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 427 if (idxd->hw.gen_cap.config_en) 428 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 429 430 /* reading group capabilities */ 431 idxd->hw.group_cap.bits = 432 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 433 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 434 idxd->max_groups = idxd->hw.group_cap.num_groups; 435 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 436 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; 437 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); 438 idxd->nr_rdbufs = idxd->max_rdbufs; 439 440 /* read engine capabilities */ 441 idxd->hw.engine_cap.bits = 442 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 443 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 444 idxd->max_engines = idxd->hw.engine_cap.num_engines; 445 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 446 447 /* read workqueue capabilities */ 448 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 449 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 450 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 451 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 452 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 453 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 454 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 455 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 456 457 /* reading operation capabilities */ 458 for (i = 0; i < 4; i++) { 459 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 460 IDXD_OPCAP_OFFSET + i * sizeof(u64)); 461 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 462 } 463 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); 464 } 465 466 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 467 { 468 struct device *dev = &pdev->dev; 469 struct device *conf_dev; 470 struct idxd_device *idxd; 471 int rc; 472 473 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 474 if (!idxd) 475 return NULL; 476 477 conf_dev = idxd_confdev(idxd); 478 idxd->pdev = pdev; 479 idxd->data = data; 480 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 481 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 482 if (idxd->id < 0) 483 return NULL; 484 485 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); 486 if (!idxd->opcap_bmap) { 487 ida_free(&idxd_ida, idxd->id); 488 return NULL; 489 } 490 491 device_initialize(conf_dev); 492 conf_dev->parent = dev; 493 conf_dev->bus = &dsa_bus_type; 494 conf_dev->type = idxd->data->dev_type; 495 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 496 if (rc < 0) { 497 put_device(conf_dev); 498 return NULL; 499 } 500 501 spin_lock_init(&idxd->dev_lock); 502 spin_lock_init(&idxd->cmd_lock); 503 504 return idxd; 505 } 506 507 static int idxd_enable_system_pasid(struct idxd_device *idxd) 508 { 509 return -EOPNOTSUPP; 510 } 511 512 static void idxd_disable_system_pasid(struct idxd_device *idxd) 513 { 514 515 iommu_sva_unbind_device(idxd->sva); 516 idxd->sva = NULL; 517 } 518 519 static int idxd_probe(struct idxd_device *idxd) 520 { 521 struct pci_dev *pdev = idxd->pdev; 522 struct device *dev = &pdev->dev; 523 int rc; 524 525 dev_dbg(dev, "%s entered and resetting device\n", __func__); 526 rc = idxd_device_init_reset(idxd); 527 if (rc < 0) 528 return rc; 529 530 dev_dbg(dev, "IDXD reset complete\n"); 531 532 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 533 if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) { 534 dev_warn(dev, "Unable to turn on user SVA feature.\n"); 535 } else { 536 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 537 538 if (idxd_enable_system_pasid(idxd)) 539 dev_warn(dev, "No in-kernel DMA with PASID.\n"); 540 else 541 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 542 } 543 } else if (!sva) { 544 dev_warn(dev, "User forced SVA off via module param.\n"); 545 } 546 547 idxd_read_caps(idxd); 548 idxd_read_table_offsets(idxd); 549 550 rc = idxd_setup_internals(idxd); 551 if (rc) 552 goto err; 553 554 /* If the configs are readonly, then load them from device */ 555 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 556 dev_dbg(dev, "Loading RO device config\n"); 557 rc = idxd_device_load_config(idxd); 558 if (rc < 0) 559 goto err_config; 560 } 561 562 rc = idxd_setup_interrupts(idxd); 563 if (rc) 564 goto err_config; 565 566 idxd->major = idxd_cdev_get_major(idxd); 567 568 rc = perfmon_pmu_init(idxd); 569 if (rc < 0) 570 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 571 572 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 573 return 0; 574 575 err_config: 576 idxd_cleanup_internals(idxd); 577 err: 578 if (device_pasid_enabled(idxd)) 579 idxd_disable_system_pasid(idxd); 580 if (device_user_pasid_enabled(idxd)) 581 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 582 return rc; 583 } 584 585 static void idxd_cleanup(struct idxd_device *idxd) 586 { 587 struct device *dev = &idxd->pdev->dev; 588 589 perfmon_pmu_remove(idxd); 590 idxd_cleanup_interrupts(idxd); 591 idxd_cleanup_internals(idxd); 592 if (device_pasid_enabled(idxd)) 593 idxd_disable_system_pasid(idxd); 594 if (device_user_pasid_enabled(idxd)) 595 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 596 } 597 598 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 599 { 600 struct device *dev = &pdev->dev; 601 struct idxd_device *idxd; 602 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 603 int rc; 604 605 rc = pci_enable_device(pdev); 606 if (rc) 607 return rc; 608 609 dev_dbg(dev, "Alloc IDXD context\n"); 610 idxd = idxd_alloc(pdev, data); 611 if (!idxd) { 612 rc = -ENOMEM; 613 goto err_idxd_alloc; 614 } 615 616 dev_dbg(dev, "Mapping BARs\n"); 617 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 618 if (!idxd->reg_base) { 619 rc = -ENOMEM; 620 goto err_iomap; 621 } 622 623 dev_dbg(dev, "Set DMA masks\n"); 624 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 625 if (rc) 626 goto err; 627 628 dev_dbg(dev, "Set PCI master\n"); 629 pci_set_master(pdev); 630 pci_set_drvdata(pdev, idxd); 631 632 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 633 rc = idxd_probe(idxd); 634 if (rc) { 635 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 636 goto err; 637 } 638 639 rc = idxd_register_devices(idxd); 640 if (rc) { 641 dev_err(dev, "IDXD sysfs setup failed\n"); 642 goto err_dev_register; 643 } 644 645 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 646 idxd->hw.version); 647 648 return 0; 649 650 err_dev_register: 651 idxd_cleanup(idxd); 652 err: 653 pci_iounmap(pdev, idxd->reg_base); 654 err_iomap: 655 put_device(idxd_confdev(idxd)); 656 err_idxd_alloc: 657 pci_disable_device(pdev); 658 return rc; 659 } 660 661 void idxd_wqs_quiesce(struct idxd_device *idxd) 662 { 663 struct idxd_wq *wq; 664 int i; 665 666 for (i = 0; i < idxd->max_wqs; i++) { 667 wq = idxd->wqs[i]; 668 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 669 idxd_wq_quiesce(wq); 670 } 671 } 672 673 static void idxd_shutdown(struct pci_dev *pdev) 674 { 675 struct idxd_device *idxd = pci_get_drvdata(pdev); 676 struct idxd_irq_entry *irq_entry; 677 int rc; 678 679 rc = idxd_device_disable(idxd); 680 if (rc) 681 dev_err(&pdev->dev, "Disabling device failed\n"); 682 683 irq_entry = &idxd->ie; 684 synchronize_irq(irq_entry->vector); 685 idxd_mask_error_interrupts(idxd); 686 flush_workqueue(idxd->wq); 687 } 688 689 static void idxd_remove(struct pci_dev *pdev) 690 { 691 struct idxd_device *idxd = pci_get_drvdata(pdev); 692 struct idxd_irq_entry *irq_entry; 693 694 idxd_unregister_devices(idxd); 695 /* 696 * When ->release() is called for the idxd->conf_dev, it frees all the memory related 697 * to the idxd context. The driver still needs those bits in order to do the rest of 698 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 699 * on the device here to hold off the freeing while allowing the idxd sub-driver 700 * to unbind. 701 */ 702 get_device(idxd_confdev(idxd)); 703 device_unregister(idxd_confdev(idxd)); 704 idxd_shutdown(pdev); 705 if (device_pasid_enabled(idxd)) 706 idxd_disable_system_pasid(idxd); 707 708 irq_entry = idxd_get_ie(idxd, 0); 709 free_irq(irq_entry->vector, irq_entry); 710 pci_free_irq_vectors(pdev); 711 pci_iounmap(pdev, idxd->reg_base); 712 if (device_user_pasid_enabled(idxd)) 713 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 714 pci_disable_device(pdev); 715 destroy_workqueue(idxd->wq); 716 perfmon_pmu_remove(idxd); 717 put_device(idxd_confdev(idxd)); 718 } 719 720 static struct pci_driver idxd_pci_driver = { 721 .name = DRV_NAME, 722 .id_table = idxd_pci_tbl, 723 .probe = idxd_pci_probe, 724 .remove = idxd_remove, 725 .shutdown = idxd_shutdown, 726 }; 727 728 static int __init idxd_init_module(void) 729 { 730 int err; 731 732 /* 733 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 734 * enumerating the device. We can not utilize it. 735 */ 736 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 737 pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 738 return -ENODEV; 739 } 740 741 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 742 pr_warn("Platform does not have ENQCMD(S) support.\n"); 743 else 744 support_enqcmd = true; 745 746 perfmon_init(); 747 748 err = idxd_driver_register(&idxd_drv); 749 if (err < 0) 750 goto err_idxd_driver_register; 751 752 err = idxd_driver_register(&idxd_dmaengine_drv); 753 if (err < 0) 754 goto err_idxd_dmaengine_driver_register; 755 756 err = idxd_driver_register(&idxd_user_drv); 757 if (err < 0) 758 goto err_idxd_user_driver_register; 759 760 err = idxd_cdev_register(); 761 if (err) 762 goto err_cdev_register; 763 764 err = pci_register_driver(&idxd_pci_driver); 765 if (err) 766 goto err_pci_register; 767 768 return 0; 769 770 err_pci_register: 771 idxd_cdev_remove(); 772 err_cdev_register: 773 idxd_driver_unregister(&idxd_user_drv); 774 err_idxd_user_driver_register: 775 idxd_driver_unregister(&idxd_dmaengine_drv); 776 err_idxd_dmaengine_driver_register: 777 idxd_driver_unregister(&idxd_drv); 778 err_idxd_driver_register: 779 return err; 780 } 781 module_init(idxd_init_module); 782 783 static void __exit idxd_exit_module(void) 784 { 785 idxd_driver_unregister(&idxd_user_drv); 786 idxd_driver_unregister(&idxd_dmaengine_drv); 787 idxd_driver_unregister(&idxd_drv); 788 pci_unregister_driver(&idxd_pci_driver); 789 idxd_cdev_remove(); 790 perfmon_exit(); 791 } 792 module_exit(idxd_exit_module); 793