1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/workqueue.h> 12 #include <linux/aer.h> 13 #include <linux/fs.h> 14 #include <linux/io-64-nonatomic-lo-hi.h> 15 #include <linux/device.h> 16 #include <linux/idr.h> 17 #include <linux/intel-svm.h> 18 #include <linux/iommu.h> 19 #include <uapi/linux/idxd.h> 20 #include <linux/dmaengine.h> 21 #include "../dmaengine.h" 22 #include "registers.h" 23 #include "idxd.h" 24 #include "perfmon.h" 25 26 MODULE_VERSION(IDXD_DRIVER_VERSION); 27 MODULE_LICENSE("GPL v2"); 28 MODULE_AUTHOR("Intel Corporation"); 29 MODULE_IMPORT_NS(IDXD); 30 31 static bool sva = true; 32 module_param(sva, bool, 0644); 33 MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 34 35 bool tc_override; 36 module_param(tc_override, bool, 0644); 37 MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 38 39 #define DRV_NAME "idxd" 40 41 bool support_enqcmd; 42 DEFINE_IDA(idxd_ida); 43 44 static struct idxd_driver_data idxd_driver_data[] = { 45 [IDXD_TYPE_DSA] = { 46 .name_prefix = "dsa", 47 .type = IDXD_TYPE_DSA, 48 .compl_size = sizeof(struct dsa_completion_record), 49 .align = 32, 50 .dev_type = &dsa_device_type, 51 }, 52 [IDXD_TYPE_IAX] = { 53 .name_prefix = "iax", 54 .type = IDXD_TYPE_IAX, 55 .compl_size = sizeof(struct iax_completion_record), 56 .align = 64, 57 .dev_type = &iax_device_type, 58 }, 59 }; 60 61 static struct pci_device_id idxd_pci_tbl[] = { 62 /* DSA ver 1.0 platforms */ 63 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 64 65 /* IAX ver 1.0 platforms */ 66 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 67 { 0, } 68 }; 69 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 70 71 static int idxd_setup_interrupts(struct idxd_device *idxd) 72 { 73 struct pci_dev *pdev = idxd->pdev; 74 struct device *dev = &pdev->dev; 75 struct idxd_irq_entry *irq_entry; 76 int i, msixcnt; 77 int rc = 0; 78 79 msixcnt = pci_msix_vec_count(pdev); 80 if (msixcnt < 0) { 81 dev_err(dev, "Not MSI-X interrupt capable.\n"); 82 return -ENOSPC; 83 } 84 idxd->irq_cnt = msixcnt; 85 86 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 87 if (rc != msixcnt) { 88 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 89 return -ENOSPC; 90 } 91 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 92 93 /* 94 * We implement 1 completion list per MSI-X entry except for 95 * entry 0, which is for errors and others. 96 */ 97 idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry), 98 GFP_KERNEL, dev_to_node(dev)); 99 if (!idxd->irq_entries) { 100 rc = -ENOMEM; 101 goto err_irq_entries; 102 } 103 104 for (i = 0; i < msixcnt; i++) { 105 idxd->irq_entries[i].id = i; 106 idxd->irq_entries[i].idxd = idxd; 107 /* 108 * Association of WQ should be assigned starting with irq_entry 1. 109 * irq_entry 0 is for misc interrupts and has no wq association 110 */ 111 if (i > 0) 112 idxd->irq_entries[i].wq = idxd->wqs[i - 1]; 113 idxd->irq_entries[i].vector = pci_irq_vector(pdev, i); 114 idxd->irq_entries[i].int_handle = INVALID_INT_HANDLE; 115 if (device_pasid_enabled(idxd) && i > 0) 116 idxd->irq_entries[i].pasid = idxd->pasid; 117 else 118 idxd->irq_entries[i].pasid = INVALID_IOASID; 119 spin_lock_init(&idxd->irq_entries[i].list_lock); 120 } 121 122 idxd_msix_perm_setup(idxd); 123 124 irq_entry = &idxd->irq_entries[0]; 125 rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, 126 0, "idxd-misc", irq_entry); 127 if (rc < 0) { 128 dev_err(dev, "Failed to allocate misc interrupt.\n"); 129 goto err_misc_irq; 130 } 131 132 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector); 133 134 /* first MSI-X entry is not for wq interrupts */ 135 idxd->num_wq_irqs = msixcnt - 1; 136 137 for (i = 1; i < msixcnt; i++) { 138 irq_entry = &idxd->irq_entries[i]; 139 140 init_llist_head(&idxd->irq_entries[i].pending_llist); 141 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); 142 rc = request_threaded_irq(irq_entry->vector, NULL, 143 idxd_wq_thread, 0, "idxd-portal", irq_entry); 144 if (rc < 0) { 145 dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector); 146 goto err_wq_irqs; 147 } 148 149 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector); 150 if (idxd->request_int_handles) { 151 rc = idxd_device_request_int_handle(idxd, i, &irq_entry->int_handle, 152 IDXD_IRQ_MSIX); 153 if (rc < 0) { 154 free_irq(irq_entry->vector, irq_entry); 155 goto err_wq_irqs; 156 } 157 dev_dbg(dev, "int handle requested: %u\n", irq_entry->int_handle); 158 } 159 } 160 161 idxd_unmask_error_interrupts(idxd); 162 return 0; 163 164 err_wq_irqs: 165 while (--i >= 0) { 166 irq_entry = &idxd->irq_entries[i]; 167 free_irq(irq_entry->vector, irq_entry); 168 if (irq_entry->int_handle != INVALID_INT_HANDLE) { 169 idxd_device_release_int_handle(idxd, irq_entry->int_handle, 170 IDXD_IRQ_MSIX); 171 irq_entry->int_handle = INVALID_INT_HANDLE; 172 irq_entry->pasid = INVALID_IOASID; 173 } 174 irq_entry->vector = -1; 175 irq_entry->wq = NULL; 176 irq_entry->idxd = NULL; 177 } 178 err_misc_irq: 179 /* Disable error interrupt generation */ 180 idxd_mask_error_interrupts(idxd); 181 idxd_msix_perm_clear(idxd); 182 err_irq_entries: 183 pci_free_irq_vectors(pdev); 184 dev_err(dev, "No usable interrupts\n"); 185 return rc; 186 } 187 188 static void idxd_cleanup_interrupts(struct idxd_device *idxd) 189 { 190 struct pci_dev *pdev = idxd->pdev; 191 struct idxd_irq_entry *irq_entry; 192 int i; 193 194 for (i = 0; i < idxd->irq_cnt; i++) { 195 irq_entry = &idxd->irq_entries[i]; 196 if (irq_entry->int_handle != INVALID_INT_HANDLE) { 197 idxd_device_release_int_handle(idxd, irq_entry->int_handle, 198 IDXD_IRQ_MSIX); 199 irq_entry->int_handle = INVALID_INT_HANDLE; 200 irq_entry->pasid = INVALID_IOASID; 201 } 202 irq_entry->vector = -1; 203 irq_entry->wq = NULL; 204 irq_entry->idxd = NULL; 205 free_irq(irq_entry->vector, irq_entry); 206 } 207 208 idxd_mask_error_interrupts(idxd); 209 pci_free_irq_vectors(pdev); 210 } 211 212 static int idxd_setup_wqs(struct idxd_device *idxd) 213 { 214 struct device *dev = &idxd->pdev->dev; 215 struct idxd_wq *wq; 216 struct device *conf_dev; 217 int i, rc; 218 219 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 220 GFP_KERNEL, dev_to_node(dev)); 221 if (!idxd->wqs) 222 return -ENOMEM; 223 224 for (i = 0; i < idxd->max_wqs; i++) { 225 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 226 if (!wq) { 227 rc = -ENOMEM; 228 goto err; 229 } 230 231 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 232 conf_dev = wq_confdev(wq); 233 wq->id = i; 234 wq->idxd = idxd; 235 device_initialize(wq_confdev(wq)); 236 conf_dev->parent = idxd_confdev(idxd); 237 conf_dev->bus = &dsa_bus_type; 238 conf_dev->type = &idxd_wq_device_type; 239 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 240 if (rc < 0) { 241 put_device(conf_dev); 242 goto err; 243 } 244 245 mutex_init(&wq->wq_lock); 246 init_waitqueue_head(&wq->err_queue); 247 init_completion(&wq->wq_dead); 248 init_completion(&wq->wq_resurrect); 249 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 250 wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; 251 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 252 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 253 if (!wq->wqcfg) { 254 put_device(conf_dev); 255 rc = -ENOMEM; 256 goto err; 257 } 258 idxd->wqs[i] = wq; 259 } 260 261 return 0; 262 263 err: 264 while (--i >= 0) { 265 wq = idxd->wqs[i]; 266 conf_dev = wq_confdev(wq); 267 put_device(conf_dev); 268 } 269 return rc; 270 } 271 272 static int idxd_setup_engines(struct idxd_device *idxd) 273 { 274 struct idxd_engine *engine; 275 struct device *dev = &idxd->pdev->dev; 276 struct device *conf_dev; 277 int i, rc; 278 279 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 280 GFP_KERNEL, dev_to_node(dev)); 281 if (!idxd->engines) 282 return -ENOMEM; 283 284 for (i = 0; i < idxd->max_engines; i++) { 285 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 286 if (!engine) { 287 rc = -ENOMEM; 288 goto err; 289 } 290 291 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 292 conf_dev = engine_confdev(engine); 293 engine->id = i; 294 engine->idxd = idxd; 295 device_initialize(conf_dev); 296 conf_dev->parent = idxd_confdev(idxd); 297 conf_dev->bus = &dsa_bus_type; 298 conf_dev->type = &idxd_engine_device_type; 299 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 300 if (rc < 0) { 301 put_device(conf_dev); 302 goto err; 303 } 304 305 idxd->engines[i] = engine; 306 } 307 308 return 0; 309 310 err: 311 while (--i >= 0) { 312 engine = idxd->engines[i]; 313 conf_dev = engine_confdev(engine); 314 put_device(conf_dev); 315 } 316 return rc; 317 } 318 319 static int idxd_setup_groups(struct idxd_device *idxd) 320 { 321 struct device *dev = &idxd->pdev->dev; 322 struct device *conf_dev; 323 struct idxd_group *group; 324 int i, rc; 325 326 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 327 GFP_KERNEL, dev_to_node(dev)); 328 if (!idxd->groups) 329 return -ENOMEM; 330 331 for (i = 0; i < idxd->max_groups; i++) { 332 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 333 if (!group) { 334 rc = -ENOMEM; 335 goto err; 336 } 337 338 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 339 conf_dev = group_confdev(group); 340 group->id = i; 341 group->idxd = idxd; 342 device_initialize(conf_dev); 343 conf_dev->parent = idxd_confdev(idxd); 344 conf_dev->bus = &dsa_bus_type; 345 conf_dev->type = &idxd_group_device_type; 346 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 347 if (rc < 0) { 348 put_device(conf_dev); 349 goto err; 350 } 351 352 idxd->groups[i] = group; 353 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { 354 group->tc_a = 1; 355 group->tc_b = 1; 356 } else { 357 group->tc_a = -1; 358 group->tc_b = -1; 359 } 360 } 361 362 return 0; 363 364 err: 365 while (--i >= 0) { 366 group = idxd->groups[i]; 367 put_device(group_confdev(group)); 368 } 369 return rc; 370 } 371 372 static void idxd_cleanup_internals(struct idxd_device *idxd) 373 { 374 int i; 375 376 for (i = 0; i < idxd->max_groups; i++) 377 put_device(group_confdev(idxd->groups[i])); 378 for (i = 0; i < idxd->max_engines; i++) 379 put_device(engine_confdev(idxd->engines[i])); 380 for (i = 0; i < idxd->max_wqs; i++) 381 put_device(wq_confdev(idxd->wqs[i])); 382 destroy_workqueue(idxd->wq); 383 } 384 385 static int idxd_setup_internals(struct idxd_device *idxd) 386 { 387 struct device *dev = &idxd->pdev->dev; 388 int rc, i; 389 390 init_waitqueue_head(&idxd->cmd_waitq); 391 392 rc = idxd_setup_wqs(idxd); 393 if (rc < 0) 394 goto err_wqs; 395 396 rc = idxd_setup_engines(idxd); 397 if (rc < 0) 398 goto err_engine; 399 400 rc = idxd_setup_groups(idxd); 401 if (rc < 0) 402 goto err_group; 403 404 idxd->wq = create_workqueue(dev_name(dev)); 405 if (!idxd->wq) { 406 rc = -ENOMEM; 407 goto err_wkq_create; 408 } 409 410 return 0; 411 412 err_wkq_create: 413 for (i = 0; i < idxd->max_groups; i++) 414 put_device(group_confdev(idxd->groups[i])); 415 err_group: 416 for (i = 0; i < idxd->max_engines; i++) 417 put_device(engine_confdev(idxd->engines[i])); 418 err_engine: 419 for (i = 0; i < idxd->max_wqs; i++) 420 put_device(wq_confdev(idxd->wqs[i])); 421 err_wqs: 422 return rc; 423 } 424 425 static void idxd_read_table_offsets(struct idxd_device *idxd) 426 { 427 union offsets_reg offsets; 428 struct device *dev = &idxd->pdev->dev; 429 430 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 431 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 432 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 433 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 434 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 435 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 436 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 437 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 438 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 439 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 440 } 441 442 static void idxd_read_caps(struct idxd_device *idxd) 443 { 444 struct device *dev = &idxd->pdev->dev; 445 int i; 446 447 /* reading generic capabilities */ 448 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 449 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 450 451 if (idxd->hw.gen_cap.cmd_cap) { 452 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 453 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 454 } 455 456 /* reading command capabilities */ 457 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 458 idxd->request_int_handles = true; 459 460 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 461 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 462 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; 463 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 464 if (idxd->hw.gen_cap.config_en) 465 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 466 467 /* reading group capabilities */ 468 idxd->hw.group_cap.bits = 469 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 470 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 471 idxd->max_groups = idxd->hw.group_cap.num_groups; 472 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 473 idxd->max_tokens = idxd->hw.group_cap.total_tokens; 474 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); 475 idxd->nr_tokens = idxd->max_tokens; 476 477 /* read engine capabilities */ 478 idxd->hw.engine_cap.bits = 479 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 480 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 481 idxd->max_engines = idxd->hw.engine_cap.num_engines; 482 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 483 484 /* read workqueue capabilities */ 485 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 486 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 487 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 488 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 489 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 490 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 491 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 492 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 493 494 /* reading operation capabilities */ 495 for (i = 0; i < 4; i++) { 496 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 497 IDXD_OPCAP_OFFSET + i * sizeof(u64)); 498 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 499 } 500 } 501 502 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 503 { 504 struct device *dev = &pdev->dev; 505 struct device *conf_dev; 506 struct idxd_device *idxd; 507 int rc; 508 509 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 510 if (!idxd) 511 return NULL; 512 513 conf_dev = idxd_confdev(idxd); 514 idxd->pdev = pdev; 515 idxd->data = data; 516 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 517 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 518 if (idxd->id < 0) 519 return NULL; 520 521 device_initialize(conf_dev); 522 conf_dev->parent = dev; 523 conf_dev->bus = &dsa_bus_type; 524 conf_dev->type = idxd->data->dev_type; 525 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 526 if (rc < 0) { 527 put_device(conf_dev); 528 return NULL; 529 } 530 531 spin_lock_init(&idxd->dev_lock); 532 spin_lock_init(&idxd->cmd_lock); 533 534 return idxd; 535 } 536 537 static int idxd_enable_system_pasid(struct idxd_device *idxd) 538 { 539 int flags; 540 unsigned int pasid; 541 struct iommu_sva *sva; 542 543 flags = SVM_FLAG_SUPERVISOR_MODE; 544 545 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); 546 if (IS_ERR(sva)) { 547 dev_warn(&idxd->pdev->dev, 548 "iommu sva bind failed: %ld\n", PTR_ERR(sva)); 549 return PTR_ERR(sva); 550 } 551 552 pasid = iommu_sva_get_pasid(sva); 553 if (pasid == IOMMU_PASID_INVALID) { 554 iommu_sva_unbind_device(sva); 555 return -ENODEV; 556 } 557 558 idxd->sva = sva; 559 idxd->pasid = pasid; 560 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); 561 return 0; 562 } 563 564 static void idxd_disable_system_pasid(struct idxd_device *idxd) 565 { 566 567 iommu_sva_unbind_device(idxd->sva); 568 idxd->sva = NULL; 569 } 570 571 static int idxd_probe(struct idxd_device *idxd) 572 { 573 struct pci_dev *pdev = idxd->pdev; 574 struct device *dev = &pdev->dev; 575 int rc; 576 577 dev_dbg(dev, "%s entered and resetting device\n", __func__); 578 rc = idxd_device_init_reset(idxd); 579 if (rc < 0) 580 return rc; 581 582 dev_dbg(dev, "IDXD reset complete\n"); 583 584 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 585 rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); 586 if (rc == 0) { 587 rc = idxd_enable_system_pasid(idxd); 588 if (rc < 0) { 589 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 590 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); 591 } else { 592 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 593 } 594 } else { 595 dev_warn(dev, "Unable to turn on SVA feature.\n"); 596 } 597 } else if (!sva) { 598 dev_warn(dev, "User forced SVA off via module param.\n"); 599 } 600 601 idxd_read_caps(idxd); 602 idxd_read_table_offsets(idxd); 603 604 rc = idxd_setup_internals(idxd); 605 if (rc) 606 goto err; 607 608 /* If the configs are readonly, then load them from device */ 609 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 610 dev_dbg(dev, "Loading RO device config\n"); 611 rc = idxd_device_load_config(idxd); 612 if (rc < 0) 613 goto err_config; 614 } 615 616 rc = idxd_setup_interrupts(idxd); 617 if (rc) 618 goto err_config; 619 620 dev_dbg(dev, "IDXD interrupt setup complete.\n"); 621 622 idxd->major = idxd_cdev_get_major(idxd); 623 624 rc = perfmon_pmu_init(idxd); 625 if (rc < 0) 626 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 627 628 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 629 return 0; 630 631 err_config: 632 idxd_cleanup_internals(idxd); 633 err: 634 if (device_pasid_enabled(idxd)) 635 idxd_disable_system_pasid(idxd); 636 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 637 return rc; 638 } 639 640 static void idxd_cleanup(struct idxd_device *idxd) 641 { 642 struct device *dev = &idxd->pdev->dev; 643 644 perfmon_pmu_remove(idxd); 645 idxd_cleanup_interrupts(idxd); 646 idxd_cleanup_internals(idxd); 647 if (device_pasid_enabled(idxd)) 648 idxd_disable_system_pasid(idxd); 649 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 650 } 651 652 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 653 { 654 struct device *dev = &pdev->dev; 655 struct idxd_device *idxd; 656 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 657 int rc; 658 659 rc = pci_enable_device(pdev); 660 if (rc) 661 return rc; 662 663 dev_dbg(dev, "Alloc IDXD context\n"); 664 idxd = idxd_alloc(pdev, data); 665 if (!idxd) { 666 rc = -ENOMEM; 667 goto err_idxd_alloc; 668 } 669 670 dev_dbg(dev, "Mapping BARs\n"); 671 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 672 if (!idxd->reg_base) { 673 rc = -ENOMEM; 674 goto err_iomap; 675 } 676 677 dev_dbg(dev, "Set DMA masks\n"); 678 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 679 if (rc) 680 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 681 if (rc) 682 goto err; 683 684 dev_dbg(dev, "Set PCI master\n"); 685 pci_set_master(pdev); 686 pci_set_drvdata(pdev, idxd); 687 688 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 689 rc = idxd_probe(idxd); 690 if (rc) { 691 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 692 goto err; 693 } 694 695 rc = idxd_register_devices(idxd); 696 if (rc) { 697 dev_err(dev, "IDXD sysfs setup failed\n"); 698 goto err_dev_register; 699 } 700 701 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 702 idxd->hw.version); 703 704 return 0; 705 706 err_dev_register: 707 idxd_cleanup(idxd); 708 err: 709 pci_iounmap(pdev, idxd->reg_base); 710 err_iomap: 711 put_device(idxd_confdev(idxd)); 712 err_idxd_alloc: 713 pci_disable_device(pdev); 714 return rc; 715 } 716 717 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) 718 { 719 struct idxd_desc *desc, *itr; 720 struct llist_node *head; 721 722 head = llist_del_all(&ie->pending_llist); 723 if (!head) 724 return; 725 726 llist_for_each_entry_safe(desc, itr, head, llnode) 727 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); 728 } 729 730 static void idxd_flush_work_list(struct idxd_irq_entry *ie) 731 { 732 struct idxd_desc *desc, *iter; 733 734 list_for_each_entry_safe(desc, iter, &ie->work_list, list) { 735 list_del(&desc->list); 736 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); 737 } 738 } 739 740 void idxd_wqs_quiesce(struct idxd_device *idxd) 741 { 742 struct idxd_wq *wq; 743 int i; 744 745 for (i = 0; i < idxd->max_wqs; i++) { 746 wq = idxd->wqs[i]; 747 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 748 idxd_wq_quiesce(wq); 749 } 750 } 751 752 static void idxd_release_int_handles(struct idxd_device *idxd) 753 { 754 struct device *dev = &idxd->pdev->dev; 755 int i, rc; 756 757 for (i = 1; i < idxd->irq_cnt; i++) { 758 struct idxd_irq_entry *ie = &idxd->irq_entries[i]; 759 760 if (ie->int_handle != INVALID_INT_HANDLE) { 761 rc = idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); 762 if (rc < 0) 763 dev_warn(dev, "irq handle %d release failed\n", ie->int_handle); 764 else 765 dev_dbg(dev, "int handle released: %u\n", ie->int_handle); 766 } 767 } 768 } 769 770 static void idxd_shutdown(struct pci_dev *pdev) 771 { 772 struct idxd_device *idxd = pci_get_drvdata(pdev); 773 int rc, i; 774 struct idxd_irq_entry *irq_entry; 775 int msixcnt = pci_msix_vec_count(pdev); 776 777 rc = idxd_device_disable(idxd); 778 if (rc) 779 dev_err(&pdev->dev, "Disabling device failed\n"); 780 781 dev_dbg(&pdev->dev, "%s called\n", __func__); 782 idxd_mask_msix_vectors(idxd); 783 idxd_mask_error_interrupts(idxd); 784 785 for (i = 0; i < msixcnt; i++) { 786 irq_entry = &idxd->irq_entries[i]; 787 synchronize_irq(irq_entry->vector); 788 if (i == 0) 789 continue; 790 idxd_flush_pending_llist(irq_entry); 791 idxd_flush_work_list(irq_entry); 792 } 793 flush_workqueue(idxd->wq); 794 } 795 796 static void idxd_remove(struct pci_dev *pdev) 797 { 798 struct idxd_device *idxd = pci_get_drvdata(pdev); 799 struct idxd_irq_entry *irq_entry; 800 int msixcnt = pci_msix_vec_count(pdev); 801 int i; 802 803 idxd_unregister_devices(idxd); 804 /* 805 * When ->release() is called for the idxd->conf_dev, it frees all the memory related 806 * to the idxd context. The driver still needs those bits in order to do the rest of 807 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 808 * on the device here to hold off the freeing while allowing the idxd sub-driver 809 * to unbind. 810 */ 811 get_device(idxd_confdev(idxd)); 812 device_unregister(idxd_confdev(idxd)); 813 idxd_shutdown(pdev); 814 if (device_pasid_enabled(idxd)) 815 idxd_disable_system_pasid(idxd); 816 817 for (i = 0; i < msixcnt; i++) { 818 irq_entry = &idxd->irq_entries[i]; 819 free_irq(irq_entry->vector, irq_entry); 820 } 821 idxd_msix_perm_clear(idxd); 822 idxd_release_int_handles(idxd); 823 pci_free_irq_vectors(pdev); 824 pci_iounmap(pdev, idxd->reg_base); 825 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 826 pci_disable_device(pdev); 827 destroy_workqueue(idxd->wq); 828 perfmon_pmu_remove(idxd); 829 put_device(idxd_confdev(idxd)); 830 } 831 832 static struct pci_driver idxd_pci_driver = { 833 .name = DRV_NAME, 834 .id_table = idxd_pci_tbl, 835 .probe = idxd_pci_probe, 836 .remove = idxd_remove, 837 .shutdown = idxd_shutdown, 838 }; 839 840 static int __init idxd_init_module(void) 841 { 842 int err; 843 844 /* 845 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 846 * enumerating the device. We can not utilize it. 847 */ 848 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 849 pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 850 return -ENODEV; 851 } 852 853 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 854 pr_warn("Platform does not have ENQCMD(S) support.\n"); 855 else 856 support_enqcmd = true; 857 858 perfmon_init(); 859 860 err = idxd_driver_register(&idxd_drv); 861 if (err < 0) 862 goto err_idxd_driver_register; 863 864 err = idxd_driver_register(&idxd_dmaengine_drv); 865 if (err < 0) 866 goto err_idxd_dmaengine_driver_register; 867 868 err = idxd_driver_register(&idxd_user_drv); 869 if (err < 0) 870 goto err_idxd_user_driver_register; 871 872 err = idxd_cdev_register(); 873 if (err) 874 goto err_cdev_register; 875 876 err = pci_register_driver(&idxd_pci_driver); 877 if (err) 878 goto err_pci_register; 879 880 return 0; 881 882 err_pci_register: 883 idxd_cdev_remove(); 884 err_cdev_register: 885 idxd_driver_unregister(&idxd_user_drv); 886 err_idxd_user_driver_register: 887 idxd_driver_unregister(&idxd_dmaengine_drv); 888 err_idxd_dmaengine_driver_register: 889 idxd_driver_unregister(&idxd_drv); 890 err_idxd_driver_register: 891 return err; 892 } 893 module_init(idxd_init_module); 894 895 static void __exit idxd_exit_module(void) 896 { 897 idxd_driver_unregister(&idxd_user_drv); 898 idxd_driver_unregister(&idxd_dmaengine_drv); 899 idxd_driver_unregister(&idxd_drv); 900 pci_unregister_driver(&idxd_pci_driver); 901 idxd_cdev_remove(); 902 perfmon_exit(); 903 } 904 module_exit(idxd_exit_module); 905