1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/workqueue.h> 12 #include <linux/aer.h> 13 #include <linux/fs.h> 14 #include <linux/io-64-nonatomic-lo-hi.h> 15 #include <linux/device.h> 16 #include <linux/idr.h> 17 #include <linux/intel-svm.h> 18 #include <linux/iommu.h> 19 #include <uapi/linux/idxd.h> 20 #include <linux/dmaengine.h> 21 #include "../dmaengine.h" 22 #include "registers.h" 23 #include "idxd.h" 24 #include "perfmon.h" 25 26 MODULE_VERSION(IDXD_DRIVER_VERSION); 27 MODULE_LICENSE("GPL v2"); 28 MODULE_AUTHOR("Intel Corporation"); 29 MODULE_IMPORT_NS(IDXD); 30 31 static bool sva = true; 32 module_param(sva, bool, 0644); 33 MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 34 35 bool tc_override; 36 module_param(tc_override, bool, 0644); 37 MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 38 39 #define DRV_NAME "idxd" 40 41 bool support_enqcmd; 42 DEFINE_IDA(idxd_ida); 43 44 static struct idxd_driver_data idxd_driver_data[] = { 45 [IDXD_TYPE_DSA] = { 46 .name_prefix = "dsa", 47 .type = IDXD_TYPE_DSA, 48 .compl_size = sizeof(struct dsa_completion_record), 49 .align = 32, 50 .dev_type = &dsa_device_type, 51 }, 52 [IDXD_TYPE_IAX] = { 53 .name_prefix = "iax", 54 .type = IDXD_TYPE_IAX, 55 .compl_size = sizeof(struct iax_completion_record), 56 .align = 64, 57 .dev_type = &iax_device_type, 58 }, 59 }; 60 61 static struct pci_device_id idxd_pci_tbl[] = { 62 /* DSA ver 1.0 platforms */ 63 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 64 65 /* IAX ver 1.0 platforms */ 66 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 67 { 0, } 68 }; 69 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 70 71 static int idxd_setup_interrupts(struct idxd_device *idxd) 72 { 73 struct pci_dev *pdev = idxd->pdev; 74 struct device *dev = &pdev->dev; 75 struct idxd_irq_entry *irq_entry; 76 int i, msixcnt; 77 int rc = 0; 78 79 msixcnt = pci_msix_vec_count(pdev); 80 if (msixcnt < 0) { 81 dev_err(dev, "Not MSI-X interrupt capable.\n"); 82 return -ENOSPC; 83 } 84 idxd->irq_cnt = msixcnt; 85 86 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 87 if (rc != msixcnt) { 88 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 89 return -ENOSPC; 90 } 91 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 92 93 /* 94 * We implement 1 completion list per MSI-X entry except for 95 * entry 0, which is for errors and others. 96 */ 97 idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry), 98 GFP_KERNEL, dev_to_node(dev)); 99 if (!idxd->irq_entries) { 100 rc = -ENOMEM; 101 goto err_irq_entries; 102 } 103 104 for (i = 0; i < msixcnt; i++) { 105 idxd->irq_entries[i].id = i; 106 idxd->irq_entries[i].idxd = idxd; 107 /* 108 * Association of WQ should be assigned starting with irq_entry 1. 109 * irq_entry 0 is for misc interrupts and has no wq association 110 */ 111 if (i > 0) 112 idxd->irq_entries[i].wq = idxd->wqs[i - 1]; 113 idxd->irq_entries[i].vector = pci_irq_vector(pdev, i); 114 idxd->irq_entries[i].int_handle = INVALID_INT_HANDLE; 115 if (device_pasid_enabled(idxd) && i > 0) 116 idxd->irq_entries[i].pasid = idxd->pasid; 117 else 118 idxd->irq_entries[i].pasid = INVALID_IOASID; 119 spin_lock_init(&idxd->irq_entries[i].list_lock); 120 } 121 122 idxd_msix_perm_setup(idxd); 123 124 irq_entry = &idxd->irq_entries[0]; 125 rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, 126 0, "idxd-misc", irq_entry); 127 if (rc < 0) { 128 dev_err(dev, "Failed to allocate misc interrupt.\n"); 129 goto err_misc_irq; 130 } 131 132 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector); 133 134 /* first MSI-X entry is not for wq interrupts */ 135 idxd->num_wq_irqs = msixcnt - 1; 136 137 for (i = 1; i < msixcnt; i++) { 138 irq_entry = &idxd->irq_entries[i]; 139 140 init_llist_head(&idxd->irq_entries[i].pending_llist); 141 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); 142 rc = request_threaded_irq(irq_entry->vector, NULL, 143 idxd_wq_thread, 0, "idxd-portal", irq_entry); 144 if (rc < 0) { 145 dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector); 146 goto err_wq_irqs; 147 } 148 149 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector); 150 if (idxd->request_int_handles) { 151 rc = idxd_device_request_int_handle(idxd, i, &irq_entry->int_handle, 152 IDXD_IRQ_MSIX); 153 if (rc < 0) { 154 free_irq(irq_entry->vector, irq_entry); 155 goto err_wq_irqs; 156 } 157 dev_dbg(dev, "int handle requested: %u\n", irq_entry->int_handle); 158 } 159 } 160 161 idxd_unmask_error_interrupts(idxd); 162 return 0; 163 164 err_wq_irqs: 165 while (--i >= 0) { 166 irq_entry = &idxd->irq_entries[i]; 167 free_irq(irq_entry->vector, irq_entry); 168 if (irq_entry->int_handle != INVALID_INT_HANDLE) { 169 idxd_device_release_int_handle(idxd, irq_entry->int_handle, 170 IDXD_IRQ_MSIX); 171 irq_entry->int_handle = INVALID_INT_HANDLE; 172 irq_entry->pasid = INVALID_IOASID; 173 } 174 irq_entry->vector = -1; 175 irq_entry->wq = NULL; 176 irq_entry->idxd = NULL; 177 } 178 err_misc_irq: 179 /* Disable error interrupt generation */ 180 idxd_mask_error_interrupts(idxd); 181 idxd_msix_perm_clear(idxd); 182 err_irq_entries: 183 pci_free_irq_vectors(pdev); 184 dev_err(dev, "No usable interrupts\n"); 185 return rc; 186 } 187 188 static void idxd_cleanup_interrupts(struct idxd_device *idxd) 189 { 190 struct pci_dev *pdev = idxd->pdev; 191 struct idxd_irq_entry *irq_entry; 192 int i; 193 194 for (i = 0; i < idxd->irq_cnt; i++) { 195 irq_entry = &idxd->irq_entries[i]; 196 if (irq_entry->int_handle != INVALID_INT_HANDLE) { 197 idxd_device_release_int_handle(idxd, irq_entry->int_handle, 198 IDXD_IRQ_MSIX); 199 irq_entry->int_handle = INVALID_INT_HANDLE; 200 irq_entry->pasid = INVALID_IOASID; 201 } 202 irq_entry->vector = -1; 203 irq_entry->wq = NULL; 204 irq_entry->idxd = NULL; 205 free_irq(irq_entry->vector, irq_entry); 206 } 207 208 idxd_mask_error_interrupts(idxd); 209 pci_free_irq_vectors(pdev); 210 } 211 212 static int idxd_setup_wqs(struct idxd_device *idxd) 213 { 214 struct device *dev = &idxd->pdev->dev; 215 struct idxd_wq *wq; 216 struct device *conf_dev; 217 int i, rc; 218 219 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 220 GFP_KERNEL, dev_to_node(dev)); 221 if (!idxd->wqs) 222 return -ENOMEM; 223 224 for (i = 0; i < idxd->max_wqs; i++) { 225 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 226 if (!wq) { 227 rc = -ENOMEM; 228 goto err; 229 } 230 231 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 232 conf_dev = wq_confdev(wq); 233 wq->id = i; 234 wq->idxd = idxd; 235 device_initialize(wq_confdev(wq)); 236 conf_dev->parent = idxd_confdev(idxd); 237 conf_dev->bus = &dsa_bus_type; 238 conf_dev->type = &idxd_wq_device_type; 239 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 240 if (rc < 0) { 241 put_device(conf_dev); 242 goto err; 243 } 244 245 mutex_init(&wq->wq_lock); 246 init_waitqueue_head(&wq->err_queue); 247 init_completion(&wq->wq_dead); 248 init_completion(&wq->wq_resurrect); 249 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 250 wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; 251 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 252 if (!wq->wqcfg) { 253 put_device(conf_dev); 254 rc = -ENOMEM; 255 goto err; 256 } 257 idxd->wqs[i] = wq; 258 } 259 260 return 0; 261 262 err: 263 while (--i >= 0) { 264 wq = idxd->wqs[i]; 265 conf_dev = wq_confdev(wq); 266 put_device(conf_dev); 267 } 268 return rc; 269 } 270 271 static int idxd_setup_engines(struct idxd_device *idxd) 272 { 273 struct idxd_engine *engine; 274 struct device *dev = &idxd->pdev->dev; 275 struct device *conf_dev; 276 int i, rc; 277 278 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 279 GFP_KERNEL, dev_to_node(dev)); 280 if (!idxd->engines) 281 return -ENOMEM; 282 283 for (i = 0; i < idxd->max_engines; i++) { 284 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 285 if (!engine) { 286 rc = -ENOMEM; 287 goto err; 288 } 289 290 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 291 conf_dev = engine_confdev(engine); 292 engine->id = i; 293 engine->idxd = idxd; 294 device_initialize(conf_dev); 295 conf_dev->parent = idxd_confdev(idxd); 296 conf_dev->bus = &dsa_bus_type; 297 conf_dev->type = &idxd_engine_device_type; 298 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 299 if (rc < 0) { 300 put_device(conf_dev); 301 goto err; 302 } 303 304 idxd->engines[i] = engine; 305 } 306 307 return 0; 308 309 err: 310 while (--i >= 0) { 311 engine = idxd->engines[i]; 312 conf_dev = engine_confdev(engine); 313 put_device(conf_dev); 314 } 315 return rc; 316 } 317 318 static int idxd_setup_groups(struct idxd_device *idxd) 319 { 320 struct device *dev = &idxd->pdev->dev; 321 struct device *conf_dev; 322 struct idxd_group *group; 323 int i, rc; 324 325 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 326 GFP_KERNEL, dev_to_node(dev)); 327 if (!idxd->groups) 328 return -ENOMEM; 329 330 for (i = 0; i < idxd->max_groups; i++) { 331 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 332 if (!group) { 333 rc = -ENOMEM; 334 goto err; 335 } 336 337 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 338 conf_dev = group_confdev(group); 339 group->id = i; 340 group->idxd = idxd; 341 device_initialize(conf_dev); 342 conf_dev->parent = idxd_confdev(idxd); 343 conf_dev->bus = &dsa_bus_type; 344 conf_dev->type = &idxd_group_device_type; 345 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 346 if (rc < 0) { 347 put_device(conf_dev); 348 goto err; 349 } 350 351 idxd->groups[i] = group; 352 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { 353 group->tc_a = 1; 354 group->tc_b = 1; 355 } else { 356 group->tc_a = -1; 357 group->tc_b = -1; 358 } 359 } 360 361 return 0; 362 363 err: 364 while (--i >= 0) { 365 group = idxd->groups[i]; 366 put_device(group_confdev(group)); 367 } 368 return rc; 369 } 370 371 static void idxd_cleanup_internals(struct idxd_device *idxd) 372 { 373 int i; 374 375 for (i = 0; i < idxd->max_groups; i++) 376 put_device(group_confdev(idxd->groups[i])); 377 for (i = 0; i < idxd->max_engines; i++) 378 put_device(engine_confdev(idxd->engines[i])); 379 for (i = 0; i < idxd->max_wqs; i++) 380 put_device(wq_confdev(idxd->wqs[i])); 381 destroy_workqueue(idxd->wq); 382 } 383 384 static int idxd_setup_internals(struct idxd_device *idxd) 385 { 386 struct device *dev = &idxd->pdev->dev; 387 int rc, i; 388 389 init_waitqueue_head(&idxd->cmd_waitq); 390 391 rc = idxd_setup_wqs(idxd); 392 if (rc < 0) 393 goto err_wqs; 394 395 rc = idxd_setup_engines(idxd); 396 if (rc < 0) 397 goto err_engine; 398 399 rc = idxd_setup_groups(idxd); 400 if (rc < 0) 401 goto err_group; 402 403 idxd->wq = create_workqueue(dev_name(dev)); 404 if (!idxd->wq) { 405 rc = -ENOMEM; 406 goto err_wkq_create; 407 } 408 409 return 0; 410 411 err_wkq_create: 412 for (i = 0; i < idxd->max_groups; i++) 413 put_device(group_confdev(idxd->groups[i])); 414 err_group: 415 for (i = 0; i < idxd->max_engines; i++) 416 put_device(engine_confdev(idxd->engines[i])); 417 err_engine: 418 for (i = 0; i < idxd->max_wqs; i++) 419 put_device(wq_confdev(idxd->wqs[i])); 420 err_wqs: 421 return rc; 422 } 423 424 static void idxd_read_table_offsets(struct idxd_device *idxd) 425 { 426 union offsets_reg offsets; 427 struct device *dev = &idxd->pdev->dev; 428 429 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 430 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 431 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 432 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 433 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 434 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 435 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 436 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 437 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 438 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 439 } 440 441 static void idxd_read_caps(struct idxd_device *idxd) 442 { 443 struct device *dev = &idxd->pdev->dev; 444 int i; 445 446 /* reading generic capabilities */ 447 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 448 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 449 450 if (idxd->hw.gen_cap.cmd_cap) { 451 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 452 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 453 } 454 455 /* reading command capabilities */ 456 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 457 idxd->request_int_handles = true; 458 459 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 460 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 461 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; 462 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 463 if (idxd->hw.gen_cap.config_en) 464 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 465 466 /* reading group capabilities */ 467 idxd->hw.group_cap.bits = 468 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 469 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 470 idxd->max_groups = idxd->hw.group_cap.num_groups; 471 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 472 idxd->max_tokens = idxd->hw.group_cap.total_tokens; 473 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); 474 idxd->nr_tokens = idxd->max_tokens; 475 476 /* read engine capabilities */ 477 idxd->hw.engine_cap.bits = 478 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 479 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 480 idxd->max_engines = idxd->hw.engine_cap.num_engines; 481 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 482 483 /* read workqueue capabilities */ 484 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 485 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 486 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 487 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 488 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 489 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 490 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 491 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 492 493 /* reading operation capabilities */ 494 for (i = 0; i < 4; i++) { 495 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 496 IDXD_OPCAP_OFFSET + i * sizeof(u64)); 497 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 498 } 499 } 500 501 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 502 { 503 struct device *dev = &pdev->dev; 504 struct device *conf_dev; 505 struct idxd_device *idxd; 506 int rc; 507 508 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 509 if (!idxd) 510 return NULL; 511 512 conf_dev = idxd_confdev(idxd); 513 idxd->pdev = pdev; 514 idxd->data = data; 515 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 516 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 517 if (idxd->id < 0) 518 return NULL; 519 520 device_initialize(conf_dev); 521 conf_dev->parent = dev; 522 conf_dev->bus = &dsa_bus_type; 523 conf_dev->type = idxd->data->dev_type; 524 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 525 if (rc < 0) { 526 put_device(conf_dev); 527 return NULL; 528 } 529 530 spin_lock_init(&idxd->dev_lock); 531 spin_lock_init(&idxd->cmd_lock); 532 533 return idxd; 534 } 535 536 static int idxd_enable_system_pasid(struct idxd_device *idxd) 537 { 538 int flags; 539 unsigned int pasid; 540 struct iommu_sva *sva; 541 542 flags = SVM_FLAG_SUPERVISOR_MODE; 543 544 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); 545 if (IS_ERR(sva)) { 546 dev_warn(&idxd->pdev->dev, 547 "iommu sva bind failed: %ld\n", PTR_ERR(sva)); 548 return PTR_ERR(sva); 549 } 550 551 pasid = iommu_sva_get_pasid(sva); 552 if (pasid == IOMMU_PASID_INVALID) { 553 iommu_sva_unbind_device(sva); 554 return -ENODEV; 555 } 556 557 idxd->sva = sva; 558 idxd->pasid = pasid; 559 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); 560 return 0; 561 } 562 563 static void idxd_disable_system_pasid(struct idxd_device *idxd) 564 { 565 566 iommu_sva_unbind_device(idxd->sva); 567 idxd->sva = NULL; 568 } 569 570 static int idxd_probe(struct idxd_device *idxd) 571 { 572 struct pci_dev *pdev = idxd->pdev; 573 struct device *dev = &pdev->dev; 574 int rc; 575 576 dev_dbg(dev, "%s entered and resetting device\n", __func__); 577 rc = idxd_device_init_reset(idxd); 578 if (rc < 0) 579 return rc; 580 581 dev_dbg(dev, "IDXD reset complete\n"); 582 583 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 584 rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); 585 if (rc == 0) { 586 rc = idxd_enable_system_pasid(idxd); 587 if (rc < 0) { 588 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 589 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); 590 } else { 591 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 592 } 593 } else { 594 dev_warn(dev, "Unable to turn on SVA feature.\n"); 595 } 596 } else if (!sva) { 597 dev_warn(dev, "User forced SVA off via module param.\n"); 598 } 599 600 idxd_read_caps(idxd); 601 idxd_read_table_offsets(idxd); 602 603 rc = idxd_setup_internals(idxd); 604 if (rc) 605 goto err; 606 607 /* If the configs are readonly, then load them from device */ 608 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 609 dev_dbg(dev, "Loading RO device config\n"); 610 rc = idxd_device_load_config(idxd); 611 if (rc < 0) 612 goto err_config; 613 } 614 615 rc = idxd_setup_interrupts(idxd); 616 if (rc) 617 goto err_config; 618 619 dev_dbg(dev, "IDXD interrupt setup complete.\n"); 620 621 idxd->major = idxd_cdev_get_major(idxd); 622 623 rc = perfmon_pmu_init(idxd); 624 if (rc < 0) 625 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 626 627 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 628 return 0; 629 630 err_config: 631 idxd_cleanup_internals(idxd); 632 err: 633 if (device_pasid_enabled(idxd)) 634 idxd_disable_system_pasid(idxd); 635 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 636 return rc; 637 } 638 639 static void idxd_cleanup(struct idxd_device *idxd) 640 { 641 struct device *dev = &idxd->pdev->dev; 642 643 perfmon_pmu_remove(idxd); 644 idxd_cleanup_interrupts(idxd); 645 idxd_cleanup_internals(idxd); 646 if (device_pasid_enabled(idxd)) 647 idxd_disable_system_pasid(idxd); 648 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 649 } 650 651 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 652 { 653 struct device *dev = &pdev->dev; 654 struct idxd_device *idxd; 655 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 656 int rc; 657 658 rc = pci_enable_device(pdev); 659 if (rc) 660 return rc; 661 662 dev_dbg(dev, "Alloc IDXD context\n"); 663 idxd = idxd_alloc(pdev, data); 664 if (!idxd) { 665 rc = -ENOMEM; 666 goto err_idxd_alloc; 667 } 668 669 dev_dbg(dev, "Mapping BARs\n"); 670 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 671 if (!idxd->reg_base) { 672 rc = -ENOMEM; 673 goto err_iomap; 674 } 675 676 dev_dbg(dev, "Set DMA masks\n"); 677 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 678 if (rc) 679 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 680 if (rc) 681 goto err; 682 683 dev_dbg(dev, "Set PCI master\n"); 684 pci_set_master(pdev); 685 pci_set_drvdata(pdev, idxd); 686 687 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 688 rc = idxd_probe(idxd); 689 if (rc) { 690 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 691 goto err; 692 } 693 694 rc = idxd_register_devices(idxd); 695 if (rc) { 696 dev_err(dev, "IDXD sysfs setup failed\n"); 697 goto err_dev_register; 698 } 699 700 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 701 idxd->hw.version); 702 703 return 0; 704 705 err_dev_register: 706 idxd_cleanup(idxd); 707 err: 708 pci_iounmap(pdev, idxd->reg_base); 709 err_iomap: 710 put_device(idxd_confdev(idxd)); 711 err_idxd_alloc: 712 pci_disable_device(pdev); 713 return rc; 714 } 715 716 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) 717 { 718 struct idxd_desc *desc, *itr; 719 struct llist_node *head; 720 721 head = llist_del_all(&ie->pending_llist); 722 if (!head) 723 return; 724 725 llist_for_each_entry_safe(desc, itr, head, llnode) 726 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); 727 } 728 729 static void idxd_flush_work_list(struct idxd_irq_entry *ie) 730 { 731 struct idxd_desc *desc, *iter; 732 733 list_for_each_entry_safe(desc, iter, &ie->work_list, list) { 734 list_del(&desc->list); 735 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); 736 } 737 } 738 739 void idxd_wqs_quiesce(struct idxd_device *idxd) 740 { 741 struct idxd_wq *wq; 742 int i; 743 744 for (i = 0; i < idxd->max_wqs; i++) { 745 wq = idxd->wqs[i]; 746 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 747 idxd_wq_quiesce(wq); 748 } 749 } 750 751 static void idxd_release_int_handles(struct idxd_device *idxd) 752 { 753 struct device *dev = &idxd->pdev->dev; 754 int i, rc; 755 756 for (i = 1; i < idxd->irq_cnt; i++) { 757 struct idxd_irq_entry *ie = &idxd->irq_entries[i]; 758 759 if (ie->int_handle != INVALID_INT_HANDLE) { 760 rc = idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); 761 if (rc < 0) 762 dev_warn(dev, "irq handle %d release failed\n", ie->int_handle); 763 else 764 dev_dbg(dev, "int handle released: %u\n", ie->int_handle); 765 } 766 } 767 } 768 769 static void idxd_shutdown(struct pci_dev *pdev) 770 { 771 struct idxd_device *idxd = pci_get_drvdata(pdev); 772 int rc, i; 773 struct idxd_irq_entry *irq_entry; 774 int msixcnt = pci_msix_vec_count(pdev); 775 776 rc = idxd_device_disable(idxd); 777 if (rc) 778 dev_err(&pdev->dev, "Disabling device failed\n"); 779 780 dev_dbg(&pdev->dev, "%s called\n", __func__); 781 idxd_mask_msix_vectors(idxd); 782 idxd_mask_error_interrupts(idxd); 783 784 for (i = 0; i < msixcnt; i++) { 785 irq_entry = &idxd->irq_entries[i]; 786 synchronize_irq(irq_entry->vector); 787 if (i == 0) 788 continue; 789 idxd_flush_pending_llist(irq_entry); 790 idxd_flush_work_list(irq_entry); 791 } 792 flush_workqueue(idxd->wq); 793 } 794 795 static void idxd_remove(struct pci_dev *pdev) 796 { 797 struct idxd_device *idxd = pci_get_drvdata(pdev); 798 struct idxd_irq_entry *irq_entry; 799 int msixcnt = pci_msix_vec_count(pdev); 800 int i; 801 802 idxd_unregister_devices(idxd); 803 /* 804 * When ->release() is called for the idxd->conf_dev, it frees all the memory related 805 * to the idxd context. The driver still needs those bits in order to do the rest of 806 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 807 * on the device here to hold off the freeing while allowing the idxd sub-driver 808 * to unbind. 809 */ 810 get_device(idxd_confdev(idxd)); 811 device_unregister(idxd_confdev(idxd)); 812 idxd_shutdown(pdev); 813 if (device_pasid_enabled(idxd)) 814 idxd_disable_system_pasid(idxd); 815 816 for (i = 0; i < msixcnt; i++) { 817 irq_entry = &idxd->irq_entries[i]; 818 free_irq(irq_entry->vector, irq_entry); 819 } 820 idxd_msix_perm_clear(idxd); 821 idxd_release_int_handles(idxd); 822 pci_free_irq_vectors(pdev); 823 pci_iounmap(pdev, idxd->reg_base); 824 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 825 pci_disable_device(pdev); 826 destroy_workqueue(idxd->wq); 827 perfmon_pmu_remove(idxd); 828 put_device(idxd_confdev(idxd)); 829 } 830 831 static struct pci_driver idxd_pci_driver = { 832 .name = DRV_NAME, 833 .id_table = idxd_pci_tbl, 834 .probe = idxd_pci_probe, 835 .remove = idxd_remove, 836 .shutdown = idxd_shutdown, 837 }; 838 839 static int __init idxd_init_module(void) 840 { 841 int err; 842 843 /* 844 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 845 * enumerating the device. We can not utilize it. 846 */ 847 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 848 pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 849 return -ENODEV; 850 } 851 852 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 853 pr_warn("Platform does not have ENQCMD(S) support.\n"); 854 else 855 support_enqcmd = true; 856 857 perfmon_init(); 858 859 err = idxd_driver_register(&idxd_drv); 860 if (err < 0) 861 goto err_idxd_driver_register; 862 863 err = idxd_driver_register(&idxd_dmaengine_drv); 864 if (err < 0) 865 goto err_idxd_dmaengine_driver_register; 866 867 err = idxd_driver_register(&idxd_user_drv); 868 if (err < 0) 869 goto err_idxd_user_driver_register; 870 871 err = idxd_cdev_register(); 872 if (err) 873 goto err_cdev_register; 874 875 err = pci_register_driver(&idxd_pci_driver); 876 if (err) 877 goto err_pci_register; 878 879 return 0; 880 881 err_pci_register: 882 idxd_cdev_remove(); 883 err_cdev_register: 884 idxd_driver_unregister(&idxd_user_drv); 885 err_idxd_user_driver_register: 886 idxd_driver_unregister(&idxd_dmaengine_drv); 887 err_idxd_dmaengine_driver_register: 888 idxd_driver_unregister(&idxd_drv); 889 err_idxd_driver_register: 890 return err; 891 } 892 module_init(idxd_init_module); 893 894 static void __exit idxd_exit_module(void) 895 { 896 idxd_driver_unregister(&idxd_user_drv); 897 idxd_driver_unregister(&idxd_dmaengine_drv); 898 idxd_driver_unregister(&idxd_drv); 899 pci_unregister_driver(&idxd_pci_driver); 900 idxd_cdev_remove(); 901 perfmon_exit(); 902 } 903 module_exit(idxd_exit_module); 904