xref: /openbmc/linux/drivers/dma/idxd/init.c (revision ddf742d4)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
13 #include <linux/fs.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <linux/intel-svm.h>
18 #include <linux/iommu.h>
19 #include <uapi/linux/idxd.h>
20 #include <linux/dmaengine.h>
21 #include "../dmaengine.h"
22 #include "registers.h"
23 #include "idxd.h"
24 #include "perfmon.h"
25 
26 MODULE_VERSION(IDXD_DRIVER_VERSION);
27 MODULE_LICENSE("GPL v2");
28 MODULE_AUTHOR("Intel Corporation");
29 
30 static bool sva = true;
31 module_param(sva, bool, 0644);
32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
33 
34 #define DRV_NAME "idxd"
35 
36 bool support_enqcmd;
37 DEFINE_IDA(idxd_ida);
38 
39 static struct idxd_driver_data idxd_driver_data[] = {
40 	[IDXD_TYPE_DSA] = {
41 		.name_prefix = "dsa",
42 		.type = IDXD_TYPE_DSA,
43 		.compl_size = sizeof(struct dsa_completion_record),
44 		.align = 32,
45 		.dev_type = &dsa_device_type,
46 	},
47 	[IDXD_TYPE_IAX] = {
48 		.name_prefix = "iax",
49 		.type = IDXD_TYPE_IAX,
50 		.compl_size = sizeof(struct iax_completion_record),
51 		.align = 64,
52 		.dev_type = &iax_device_type,
53 	},
54 };
55 
56 static struct pci_device_id idxd_pci_tbl[] = {
57 	/* DSA ver 1.0 platforms */
58 	{ PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
59 
60 	/* IAX ver 1.0 platforms */
61 	{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
62 	{ 0, }
63 };
64 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
65 
66 static int idxd_setup_interrupts(struct idxd_device *idxd)
67 {
68 	struct pci_dev *pdev = idxd->pdev;
69 	struct device *dev = &pdev->dev;
70 	struct idxd_irq_entry *irq_entry;
71 	int i, msixcnt;
72 	int rc = 0;
73 
74 	msixcnt = pci_msix_vec_count(pdev);
75 	if (msixcnt < 0) {
76 		dev_err(dev, "Not MSI-X interrupt capable.\n");
77 		return -ENOSPC;
78 	}
79 
80 	rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
81 	if (rc != msixcnt) {
82 		dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
83 		return -ENOSPC;
84 	}
85 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
86 
87 	/*
88 	 * We implement 1 completion list per MSI-X entry except for
89 	 * entry 0, which is for errors and others.
90 	 */
91 	idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
92 					 GFP_KERNEL, dev_to_node(dev));
93 	if (!idxd->irq_entries) {
94 		rc = -ENOMEM;
95 		goto err_irq_entries;
96 	}
97 
98 	for (i = 0; i < msixcnt; i++) {
99 		idxd->irq_entries[i].id = i;
100 		idxd->irq_entries[i].idxd = idxd;
101 		idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
102 		spin_lock_init(&idxd->irq_entries[i].list_lock);
103 	}
104 
105 	irq_entry = &idxd->irq_entries[0];
106 	rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
107 				  0, "idxd-misc", irq_entry);
108 	if (rc < 0) {
109 		dev_err(dev, "Failed to allocate misc interrupt.\n");
110 		goto err_misc_irq;
111 	}
112 
113 	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
114 
115 	/* first MSI-X entry is not for wq interrupts */
116 	idxd->num_wq_irqs = msixcnt - 1;
117 
118 	for (i = 1; i < msixcnt; i++) {
119 		irq_entry = &idxd->irq_entries[i];
120 
121 		init_llist_head(&idxd->irq_entries[i].pending_llist);
122 		INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
123 		rc = request_threaded_irq(irq_entry->vector, NULL,
124 					  idxd_wq_thread, 0, "idxd-portal", irq_entry);
125 		if (rc < 0) {
126 			dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
127 			goto err_wq_irqs;
128 		}
129 
130 		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
131 		if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
132 			/*
133 			 * The MSIX vector enumeration starts at 1 with vector 0 being the
134 			 * misc interrupt that handles non I/O completion events. The
135 			 * interrupt handles are for IMS enumeration on guest. The misc
136 			 * interrupt vector does not require a handle and therefore we start
137 			 * the int_handles at index 0. Since 'i' starts at 1, the first
138 			 * int_handles index will be 0.
139 			 */
140 			rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
141 							    IDXD_IRQ_MSIX);
142 			if (rc < 0) {
143 				free_irq(irq_entry->vector, irq_entry);
144 				goto err_wq_irqs;
145 			}
146 			dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
147 		}
148 	}
149 
150 	idxd_unmask_error_interrupts(idxd);
151 	idxd_msix_perm_setup(idxd);
152 	return 0;
153 
154  err_wq_irqs:
155 	while (--i >= 0) {
156 		irq_entry = &idxd->irq_entries[i];
157 		free_irq(irq_entry->vector, irq_entry);
158 		if (i != 0)
159 			idxd_device_release_int_handle(idxd,
160 						       idxd->int_handles[i], IDXD_IRQ_MSIX);
161 	}
162  err_misc_irq:
163 	/* Disable error interrupt generation */
164 	idxd_mask_error_interrupts(idxd);
165  err_irq_entries:
166 	pci_free_irq_vectors(pdev);
167 	dev_err(dev, "No usable interrupts\n");
168 	return rc;
169 }
170 
171 static void idxd_cleanup_interrupts(struct idxd_device *idxd)
172 {
173 	struct pci_dev *pdev = idxd->pdev;
174 	struct idxd_irq_entry *irq_entry;
175 	int i, msixcnt;
176 
177 	msixcnt = pci_msix_vec_count(pdev);
178 	if (msixcnt <= 0)
179 		return;
180 
181 	irq_entry = &idxd->irq_entries[0];
182 	free_irq(irq_entry->vector, irq_entry);
183 
184 	for (i = 1; i < msixcnt; i++) {
185 
186 		irq_entry = &idxd->irq_entries[i];
187 		if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
188 			idxd_device_release_int_handle(idxd, idxd->int_handles[i],
189 						       IDXD_IRQ_MSIX);
190 		free_irq(irq_entry->vector, irq_entry);
191 	}
192 
193 	idxd_mask_error_interrupts(idxd);
194 	pci_free_irq_vectors(pdev);
195 }
196 
197 static int idxd_setup_wqs(struct idxd_device *idxd)
198 {
199 	struct device *dev = &idxd->pdev->dev;
200 	struct idxd_wq *wq;
201 	int i, rc;
202 
203 	idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
204 				 GFP_KERNEL, dev_to_node(dev));
205 	if (!idxd->wqs)
206 		return -ENOMEM;
207 
208 	for (i = 0; i < idxd->max_wqs; i++) {
209 		wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
210 		if (!wq) {
211 			rc = -ENOMEM;
212 			goto err;
213 		}
214 
215 		wq->id = i;
216 		wq->idxd = idxd;
217 		device_initialize(&wq->conf_dev);
218 		wq->conf_dev.parent = &idxd->conf_dev;
219 		wq->conf_dev.bus = &dsa_bus_type;
220 		wq->conf_dev.type = &idxd_wq_device_type;
221 		rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
222 		if (rc < 0) {
223 			put_device(&wq->conf_dev);
224 			goto err;
225 		}
226 
227 		mutex_init(&wq->wq_lock);
228 		init_waitqueue_head(&wq->err_queue);
229 		init_completion(&wq->wq_dead);
230 		wq->max_xfer_bytes = idxd->max_xfer_bytes;
231 		wq->max_batch_size = idxd->max_batch_size;
232 		wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
233 		if (!wq->wqcfg) {
234 			put_device(&wq->conf_dev);
235 			rc = -ENOMEM;
236 			goto err;
237 		}
238 		idxd->wqs[i] = wq;
239 	}
240 
241 	return 0;
242 
243  err:
244 	while (--i >= 0)
245 		put_device(&idxd->wqs[i]->conf_dev);
246 	return rc;
247 }
248 
249 static int idxd_setup_engines(struct idxd_device *idxd)
250 {
251 	struct idxd_engine *engine;
252 	struct device *dev = &idxd->pdev->dev;
253 	int i, rc;
254 
255 	idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
256 				     GFP_KERNEL, dev_to_node(dev));
257 	if (!idxd->engines)
258 		return -ENOMEM;
259 
260 	for (i = 0; i < idxd->max_engines; i++) {
261 		engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
262 		if (!engine) {
263 			rc = -ENOMEM;
264 			goto err;
265 		}
266 
267 		engine->id = i;
268 		engine->idxd = idxd;
269 		device_initialize(&engine->conf_dev);
270 		engine->conf_dev.parent = &idxd->conf_dev;
271 		engine->conf_dev.bus = &dsa_bus_type;
272 		engine->conf_dev.type = &idxd_engine_device_type;
273 		rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
274 		if (rc < 0) {
275 			put_device(&engine->conf_dev);
276 			goto err;
277 		}
278 
279 		idxd->engines[i] = engine;
280 	}
281 
282 	return 0;
283 
284  err:
285 	while (--i >= 0)
286 		put_device(&idxd->engines[i]->conf_dev);
287 	return rc;
288 }
289 
290 static int idxd_setup_groups(struct idxd_device *idxd)
291 {
292 	struct device *dev = &idxd->pdev->dev;
293 	struct idxd_group *group;
294 	int i, rc;
295 
296 	idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
297 				    GFP_KERNEL, dev_to_node(dev));
298 	if (!idxd->groups)
299 		return -ENOMEM;
300 
301 	for (i = 0; i < idxd->max_groups; i++) {
302 		group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
303 		if (!group) {
304 			rc = -ENOMEM;
305 			goto err;
306 		}
307 
308 		group->id = i;
309 		group->idxd = idxd;
310 		device_initialize(&group->conf_dev);
311 		group->conf_dev.parent = &idxd->conf_dev;
312 		group->conf_dev.bus = &dsa_bus_type;
313 		group->conf_dev.type = &idxd_group_device_type;
314 		rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
315 		if (rc < 0) {
316 			put_device(&group->conf_dev);
317 			goto err;
318 		}
319 
320 		idxd->groups[i] = group;
321 		group->tc_a = -1;
322 		group->tc_b = -1;
323 	}
324 
325 	return 0;
326 
327  err:
328 	while (--i >= 0)
329 		put_device(&idxd->groups[i]->conf_dev);
330 	return rc;
331 }
332 
333 static void idxd_cleanup_internals(struct idxd_device *idxd)
334 {
335 	int i;
336 
337 	for (i = 0; i < idxd->max_groups; i++)
338 		put_device(&idxd->groups[i]->conf_dev);
339 	for (i = 0; i < idxd->max_engines; i++)
340 		put_device(&idxd->engines[i]->conf_dev);
341 	for (i = 0; i < idxd->max_wqs; i++)
342 		put_device(&idxd->wqs[i]->conf_dev);
343 	destroy_workqueue(idxd->wq);
344 }
345 
346 static int idxd_setup_internals(struct idxd_device *idxd)
347 {
348 	struct device *dev = &idxd->pdev->dev;
349 	int rc, i;
350 
351 	init_waitqueue_head(&idxd->cmd_waitq);
352 
353 	if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
354 		idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs, sizeof(int), GFP_KERNEL);
355 		if (!idxd->int_handles)
356 			return -ENOMEM;
357 	}
358 
359 	rc = idxd_setup_wqs(idxd);
360 	if (rc < 0)
361 		goto err_wqs;
362 
363 	rc = idxd_setup_engines(idxd);
364 	if (rc < 0)
365 		goto err_engine;
366 
367 	rc = idxd_setup_groups(idxd);
368 	if (rc < 0)
369 		goto err_group;
370 
371 	idxd->wq = create_workqueue(dev_name(dev));
372 	if (!idxd->wq) {
373 		rc = -ENOMEM;
374 		goto err_wkq_create;
375 	}
376 
377 	return 0;
378 
379  err_wkq_create:
380 	for (i = 0; i < idxd->max_groups; i++)
381 		put_device(&idxd->groups[i]->conf_dev);
382  err_group:
383 	for (i = 0; i < idxd->max_engines; i++)
384 		put_device(&idxd->engines[i]->conf_dev);
385  err_engine:
386 	for (i = 0; i < idxd->max_wqs; i++)
387 		put_device(&idxd->wqs[i]->conf_dev);
388  err_wqs:
389 	kfree(idxd->int_handles);
390 	return rc;
391 }
392 
393 static void idxd_read_table_offsets(struct idxd_device *idxd)
394 {
395 	union offsets_reg offsets;
396 	struct device *dev = &idxd->pdev->dev;
397 
398 	offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
399 	offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
400 	idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
401 	dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
402 	idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
403 	dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
404 	idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
405 	dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
406 	idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
407 	dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
408 }
409 
410 static void idxd_read_caps(struct idxd_device *idxd)
411 {
412 	struct device *dev = &idxd->pdev->dev;
413 	int i;
414 
415 	/* reading generic capabilities */
416 	idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
417 	dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
418 
419 	if (idxd->hw.gen_cap.cmd_cap) {
420 		idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
421 		dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
422 	}
423 
424 	idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
425 	dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
426 	idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
427 	dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
428 	if (idxd->hw.gen_cap.config_en)
429 		set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
430 
431 	/* reading group capabilities */
432 	idxd->hw.group_cap.bits =
433 		ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
434 	dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
435 	idxd->max_groups = idxd->hw.group_cap.num_groups;
436 	dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
437 	idxd->max_tokens = idxd->hw.group_cap.total_tokens;
438 	dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
439 	idxd->nr_tokens = idxd->max_tokens;
440 
441 	/* read engine capabilities */
442 	idxd->hw.engine_cap.bits =
443 		ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
444 	dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
445 	idxd->max_engines = idxd->hw.engine_cap.num_engines;
446 	dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
447 
448 	/* read workqueue capabilities */
449 	idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
450 	dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
451 	idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
452 	dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
453 	idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
454 	dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
455 	idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
456 	dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
457 
458 	/* reading operation capabilities */
459 	for (i = 0; i < 4; i++) {
460 		idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
461 				IDXD_OPCAP_OFFSET + i * sizeof(u64));
462 		dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
463 	}
464 }
465 
466 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
467 {
468 	struct device *dev = &pdev->dev;
469 	struct idxd_device *idxd;
470 	int rc;
471 
472 	idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
473 	if (!idxd)
474 		return NULL;
475 
476 	idxd->pdev = pdev;
477 	idxd->data = data;
478 	idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
479 	if (idxd->id < 0)
480 		return NULL;
481 
482 	device_initialize(&idxd->conf_dev);
483 	idxd->conf_dev.parent = dev;
484 	idxd->conf_dev.bus = &dsa_bus_type;
485 	idxd->conf_dev.type = idxd->data->dev_type;
486 	rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
487 	if (rc < 0) {
488 		put_device(&idxd->conf_dev);
489 		return NULL;
490 	}
491 
492 	spin_lock_init(&idxd->dev_lock);
493 	spin_lock_init(&idxd->cmd_lock);
494 
495 	return idxd;
496 }
497 
498 static int idxd_enable_system_pasid(struct idxd_device *idxd)
499 {
500 	int flags;
501 	unsigned int pasid;
502 	struct iommu_sva *sva;
503 
504 	flags = SVM_FLAG_SUPERVISOR_MODE;
505 
506 	sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
507 	if (IS_ERR(sva)) {
508 		dev_warn(&idxd->pdev->dev,
509 			 "iommu sva bind failed: %ld\n", PTR_ERR(sva));
510 		return PTR_ERR(sva);
511 	}
512 
513 	pasid = iommu_sva_get_pasid(sva);
514 	if (pasid == IOMMU_PASID_INVALID) {
515 		iommu_sva_unbind_device(sva);
516 		return -ENODEV;
517 	}
518 
519 	idxd->sva = sva;
520 	idxd->pasid = pasid;
521 	dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
522 	return 0;
523 }
524 
525 static void idxd_disable_system_pasid(struct idxd_device *idxd)
526 {
527 
528 	iommu_sva_unbind_device(idxd->sva);
529 	idxd->sva = NULL;
530 }
531 
532 static int idxd_probe(struct idxd_device *idxd)
533 {
534 	struct pci_dev *pdev = idxd->pdev;
535 	struct device *dev = &pdev->dev;
536 	int rc;
537 
538 	dev_dbg(dev, "%s entered and resetting device\n", __func__);
539 	rc = idxd_device_init_reset(idxd);
540 	if (rc < 0)
541 		return rc;
542 
543 	dev_dbg(dev, "IDXD reset complete\n");
544 
545 	if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
546 		rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
547 		if (rc == 0) {
548 			rc = idxd_enable_system_pasid(idxd);
549 			if (rc < 0) {
550 				iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
551 				dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
552 			} else {
553 				set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
554 			}
555 		} else {
556 			dev_warn(dev, "Unable to turn on SVA feature.\n");
557 		}
558 	} else if (!sva) {
559 		dev_warn(dev, "User forced SVA off via module param.\n");
560 	}
561 
562 	idxd_read_caps(idxd);
563 	idxd_read_table_offsets(idxd);
564 
565 	rc = idxd_setup_internals(idxd);
566 	if (rc)
567 		goto err;
568 
569 	/* If the configs are readonly, then load them from device */
570 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
571 		dev_dbg(dev, "Loading RO device config\n");
572 		rc = idxd_device_load_config(idxd);
573 		if (rc < 0)
574 			goto err_config;
575 	}
576 
577 	rc = idxd_setup_interrupts(idxd);
578 	if (rc)
579 		goto err_config;
580 
581 	dev_dbg(dev, "IDXD interrupt setup complete.\n");
582 
583 	idxd->major = idxd_cdev_get_major(idxd);
584 
585 	rc = perfmon_pmu_init(idxd);
586 	if (rc < 0)
587 		dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
588 
589 	dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
590 	return 0;
591 
592  err_config:
593 	idxd_cleanup_internals(idxd);
594  err:
595 	if (device_pasid_enabled(idxd))
596 		idxd_disable_system_pasid(idxd);
597 	iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
598 	return rc;
599 }
600 
601 static void idxd_cleanup(struct idxd_device *idxd)
602 {
603 	struct device *dev = &idxd->pdev->dev;
604 
605 	perfmon_pmu_remove(idxd);
606 	idxd_cleanup_interrupts(idxd);
607 	idxd_cleanup_internals(idxd);
608 	if (device_pasid_enabled(idxd))
609 		idxd_disable_system_pasid(idxd);
610 	iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
611 }
612 
613 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
614 {
615 	struct device *dev = &pdev->dev;
616 	struct idxd_device *idxd;
617 	struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
618 	int rc;
619 
620 	rc = pci_enable_device(pdev);
621 	if (rc)
622 		return rc;
623 
624 	dev_dbg(dev, "Alloc IDXD context\n");
625 	idxd = idxd_alloc(pdev, data);
626 	if (!idxd) {
627 		rc = -ENOMEM;
628 		goto err_idxd_alloc;
629 	}
630 
631 	dev_dbg(dev, "Mapping BARs\n");
632 	idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
633 	if (!idxd->reg_base) {
634 		rc = -ENOMEM;
635 		goto err_iomap;
636 	}
637 
638 	dev_dbg(dev, "Set DMA masks\n");
639 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
640 	if (rc)
641 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
642 	if (rc)
643 		goto err;
644 
645 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
646 	if (rc)
647 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
648 	if (rc)
649 		goto err;
650 
651 	dev_dbg(dev, "Set PCI master\n");
652 	pci_set_master(pdev);
653 	pci_set_drvdata(pdev, idxd);
654 
655 	idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
656 	rc = idxd_probe(idxd);
657 	if (rc) {
658 		dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
659 		goto err;
660 	}
661 
662 	rc = idxd_register_devices(idxd);
663 	if (rc) {
664 		dev_err(dev, "IDXD sysfs setup failed\n");
665 		goto err_dev_register;
666 	}
667 
668 	idxd->state = IDXD_DEV_CONF_READY;
669 
670 	dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
671 		 idxd->hw.version);
672 
673 	return 0;
674 
675  err_dev_register:
676 	idxd_cleanup(idxd);
677  err:
678 	pci_iounmap(pdev, idxd->reg_base);
679  err_iomap:
680 	put_device(&idxd->conf_dev);
681  err_idxd_alloc:
682 	pci_disable_device(pdev);
683 	return rc;
684 }
685 
686 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
687 {
688 	struct idxd_desc *desc, *itr;
689 	struct llist_node *head;
690 
691 	head = llist_del_all(&ie->pending_llist);
692 	if (!head)
693 		return;
694 
695 	llist_for_each_entry_safe(desc, itr, head, llnode) {
696 		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
697 		idxd_free_desc(desc->wq, desc);
698 	}
699 }
700 
701 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
702 {
703 	struct idxd_desc *desc, *iter;
704 
705 	list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
706 		list_del(&desc->list);
707 		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
708 		idxd_free_desc(desc->wq, desc);
709 	}
710 }
711 
712 void idxd_wqs_quiesce(struct idxd_device *idxd)
713 {
714 	struct idxd_wq *wq;
715 	int i;
716 
717 	for (i = 0; i < idxd->max_wqs; i++) {
718 		wq = idxd->wqs[i];
719 		if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
720 			idxd_wq_quiesce(wq);
721 	}
722 }
723 
724 static void idxd_release_int_handles(struct idxd_device *idxd)
725 {
726 	struct device *dev = &idxd->pdev->dev;
727 	int i, rc;
728 
729 	for (i = 0; i < idxd->num_wq_irqs; i++) {
730 		if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
731 			rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
732 							    IDXD_IRQ_MSIX);
733 			if (rc < 0)
734 				dev_warn(dev, "irq handle %d release failed\n",
735 					 idxd->int_handles[i]);
736 			else
737 				dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
738 		}
739 	}
740 }
741 
742 static void idxd_shutdown(struct pci_dev *pdev)
743 {
744 	struct idxd_device *idxd = pci_get_drvdata(pdev);
745 	int rc, i;
746 	struct idxd_irq_entry *irq_entry;
747 	int msixcnt = pci_msix_vec_count(pdev);
748 
749 	rc = idxd_device_disable(idxd);
750 	if (rc)
751 		dev_err(&pdev->dev, "Disabling device failed\n");
752 
753 	dev_dbg(&pdev->dev, "%s called\n", __func__);
754 	idxd_mask_msix_vectors(idxd);
755 	idxd_mask_error_interrupts(idxd);
756 
757 	for (i = 0; i < msixcnt; i++) {
758 		irq_entry = &idxd->irq_entries[i];
759 		synchronize_irq(irq_entry->vector);
760 		free_irq(irq_entry->vector, irq_entry);
761 		if (i == 0)
762 			continue;
763 		idxd_flush_pending_llist(irq_entry);
764 		idxd_flush_work_list(irq_entry);
765 	}
766 
767 	idxd_msix_perm_clear(idxd);
768 	idxd_release_int_handles(idxd);
769 	pci_free_irq_vectors(pdev);
770 	pci_iounmap(pdev, idxd->reg_base);
771 	pci_disable_device(pdev);
772 	destroy_workqueue(idxd->wq);
773 }
774 
775 static void idxd_remove(struct pci_dev *pdev)
776 {
777 	struct idxd_device *idxd = pci_get_drvdata(pdev);
778 
779 	dev_dbg(&pdev->dev, "%s called\n", __func__);
780 	idxd_shutdown(pdev);
781 	if (device_pasid_enabled(idxd))
782 		idxd_disable_system_pasid(idxd);
783 	idxd_unregister_devices(idxd);
784 	perfmon_pmu_remove(idxd);
785 	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
786 }
787 
788 static struct pci_driver idxd_pci_driver = {
789 	.name		= DRV_NAME,
790 	.id_table	= idxd_pci_tbl,
791 	.probe		= idxd_pci_probe,
792 	.remove		= idxd_remove,
793 	.shutdown	= idxd_shutdown,
794 };
795 
796 static int __init idxd_init_module(void)
797 {
798 	int err;
799 
800 	/*
801 	 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
802 	 * enumerating the device. We can not utilize it.
803 	 */
804 	if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
805 		pr_warn("idxd driver failed to load without MOVDIR64B.\n");
806 		return -ENODEV;
807 	}
808 
809 	if (!boot_cpu_has(X86_FEATURE_ENQCMD))
810 		pr_warn("Platform does not have ENQCMD(S) support.\n");
811 	else
812 		support_enqcmd = true;
813 
814 	perfmon_init();
815 
816 	err = idxd_register_bus_type();
817 	if (err < 0)
818 		return err;
819 
820 	err = idxd_register_driver();
821 	if (err < 0)
822 		goto err_idxd_driver_register;
823 
824 	err = idxd_cdev_register();
825 	if (err)
826 		goto err_cdev_register;
827 
828 	err = pci_register_driver(&idxd_pci_driver);
829 	if (err)
830 		goto err_pci_register;
831 
832 	return 0;
833 
834 err_pci_register:
835 	idxd_cdev_remove();
836 err_cdev_register:
837 	idxd_unregister_driver();
838 err_idxd_driver_register:
839 	idxd_unregister_bus_type();
840 	return err;
841 }
842 module_init(idxd_init_module);
843 
844 static void __exit idxd_exit_module(void)
845 {
846 	idxd_unregister_driver();
847 	pci_unregister_driver(&idxd_pci_driver);
848 	idxd_cdev_remove();
849 	idxd_unregister_bus_type();
850 	perfmon_exit();
851 }
852 module_exit(idxd_exit_module);
853