xref: /openbmc/linux/drivers/dma/idxd/init.c (revision 8f47d1a5e545f903cd049c42da31a3be36178447)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
13 #include <linux/fs.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <uapi/linux/idxd.h>
18 #include <linux/dmaengine.h>
19 #include "../dmaengine.h"
20 #include "registers.h"
21 #include "idxd.h"
22 
23 MODULE_VERSION(IDXD_DRIVER_VERSION);
24 MODULE_LICENSE("GPL v2");
25 MODULE_AUTHOR("Intel Corporation");
26 
27 #define DRV_NAME "idxd"
28 
29 static struct idr idxd_idrs[IDXD_TYPE_MAX];
30 static struct mutex idxd_idr_lock;
31 
32 static struct pci_device_id idxd_pci_tbl[] = {
33 	/* DSA ver 1.0 platforms */
34 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
35 	{ 0, }
36 };
37 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
38 
39 static char *idxd_name[] = {
40 	"dsa",
41 };
42 
43 const char *idxd_get_dev_name(struct idxd_device *idxd)
44 {
45 	return idxd_name[idxd->type];
46 }
47 
48 static int idxd_setup_interrupts(struct idxd_device *idxd)
49 {
50 	struct pci_dev *pdev = idxd->pdev;
51 	struct device *dev = &pdev->dev;
52 	struct msix_entry *msix;
53 	struct idxd_irq_entry *irq_entry;
54 	int i, msixcnt;
55 	int rc = 0;
56 
57 	msixcnt = pci_msix_vec_count(pdev);
58 	if (msixcnt < 0) {
59 		dev_err(dev, "Not MSI-X interrupt capable.\n");
60 		goto err_no_irq;
61 	}
62 
63 	idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
64 			msixcnt, GFP_KERNEL);
65 	if (!idxd->msix_entries) {
66 		rc = -ENOMEM;
67 		goto err_no_irq;
68 	}
69 
70 	for (i = 0; i < msixcnt; i++)
71 		idxd->msix_entries[i].entry = i;
72 
73 	rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
74 	if (rc) {
75 		dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
76 		goto err_no_irq;
77 	}
78 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
79 
80 	/*
81 	 * We implement 1 completion list per MSI-X entry except for
82 	 * entry 0, which is for errors and others.
83 	 */
84 	idxd->irq_entries = devm_kcalloc(dev, msixcnt,
85 					 sizeof(struct idxd_irq_entry),
86 					 GFP_KERNEL);
87 	if (!idxd->irq_entries) {
88 		rc = -ENOMEM;
89 		goto err_no_irq;
90 	}
91 
92 	for (i = 0; i < msixcnt; i++) {
93 		idxd->irq_entries[i].id = i;
94 		idxd->irq_entries[i].idxd = idxd;
95 	}
96 
97 	msix = &idxd->msix_entries[0];
98 	irq_entry = &idxd->irq_entries[0];
99 	rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
100 				       idxd_misc_thread, 0, "idxd-misc",
101 				       irq_entry);
102 	if (rc < 0) {
103 		dev_err(dev, "Failed to allocate misc interrupt.\n");
104 		goto err_no_irq;
105 	}
106 
107 	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
108 		msix->vector);
109 
110 	/* first MSI-X entry is not for wq interrupts */
111 	idxd->num_wq_irqs = msixcnt - 1;
112 
113 	for (i = 1; i < msixcnt; i++) {
114 		msix = &idxd->msix_entries[i];
115 		irq_entry = &idxd->irq_entries[i];
116 
117 		init_llist_head(&idxd->irq_entries[i].pending_llist);
118 		INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
119 		rc = devm_request_threaded_irq(dev, msix->vector,
120 					       idxd_irq_handler,
121 					       idxd_wq_thread, 0,
122 					       "idxd-portal", irq_entry);
123 		if (rc < 0) {
124 			dev_err(dev, "Failed to allocate irq %d.\n",
125 				msix->vector);
126 			goto err_no_irq;
127 		}
128 		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
129 			i, msix->vector);
130 	}
131 
132 	idxd_unmask_error_interrupts(idxd);
133 
134 	return 0;
135 
136  err_no_irq:
137 	/* Disable error interrupt generation */
138 	idxd_mask_error_interrupts(idxd);
139 	pci_disable_msix(pdev);
140 	dev_err(dev, "No usable interrupts\n");
141 	return rc;
142 }
143 
144 static void idxd_wqs_free_lock(struct idxd_device *idxd)
145 {
146 	int i;
147 
148 	for (i = 0; i < idxd->max_wqs; i++) {
149 		struct idxd_wq *wq = &idxd->wqs[i];
150 
151 		percpu_free_rwsem(&wq->submit_lock);
152 	}
153 }
154 
155 static int idxd_setup_internals(struct idxd_device *idxd)
156 {
157 	struct device *dev = &idxd->pdev->dev;
158 	int i;
159 
160 	idxd->groups = devm_kcalloc(dev, idxd->max_groups,
161 				    sizeof(struct idxd_group), GFP_KERNEL);
162 	if (!idxd->groups)
163 		return -ENOMEM;
164 
165 	for (i = 0; i < idxd->max_groups; i++) {
166 		idxd->groups[i].idxd = idxd;
167 		idxd->groups[i].id = i;
168 		idxd->groups[i].tc_a = -1;
169 		idxd->groups[i].tc_b = -1;
170 	}
171 
172 	idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
173 				 GFP_KERNEL);
174 	if (!idxd->wqs)
175 		return -ENOMEM;
176 
177 	idxd->engines = devm_kcalloc(dev, idxd->max_engines,
178 				     sizeof(struct idxd_engine), GFP_KERNEL);
179 	if (!idxd->engines)
180 		return -ENOMEM;
181 
182 	for (i = 0; i < idxd->max_wqs; i++) {
183 		struct idxd_wq *wq = &idxd->wqs[i];
184 		int rc;
185 
186 		wq->id = i;
187 		wq->idxd = idxd;
188 		mutex_init(&wq->wq_lock);
189 		atomic_set(&wq->dq_count, 0);
190 		init_waitqueue_head(&wq->submit_waitq);
191 		rc = percpu_init_rwsem(&wq->submit_lock);
192 		if (rc < 0) {
193 			idxd_wqs_free_lock(idxd);
194 			return rc;
195 		}
196 	}
197 
198 	for (i = 0; i < idxd->max_engines; i++) {
199 		idxd->engines[i].idxd = idxd;
200 		idxd->engines[i].id = i;
201 	}
202 
203 	return 0;
204 }
205 
206 static void idxd_read_table_offsets(struct idxd_device *idxd)
207 {
208 	union offsets_reg offsets;
209 	struct device *dev = &idxd->pdev->dev;
210 
211 	offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
212 	offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
213 			+ sizeof(u64));
214 	idxd->grpcfg_offset = offsets.grpcfg * 0x100;
215 	dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
216 	idxd->wqcfg_offset = offsets.wqcfg * 0x100;
217 	dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
218 		idxd->wqcfg_offset);
219 	idxd->msix_perm_offset = offsets.msix_perm * 0x100;
220 	dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
221 		idxd->msix_perm_offset);
222 	idxd->perfmon_offset = offsets.perfmon * 0x100;
223 	dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
224 }
225 
226 static void idxd_read_caps(struct idxd_device *idxd)
227 {
228 	struct device *dev = &idxd->pdev->dev;
229 	int i;
230 
231 	/* reading generic capabilities */
232 	idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
233 	dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
234 	idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
235 	dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
236 	idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
237 	dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
238 	if (idxd->hw.gen_cap.config_en)
239 		set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
240 
241 	/* reading group capabilities */
242 	idxd->hw.group_cap.bits =
243 		ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
244 	dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
245 	idxd->max_groups = idxd->hw.group_cap.num_groups;
246 	dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
247 	idxd->max_tokens = idxd->hw.group_cap.total_tokens;
248 	dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
249 	idxd->nr_tokens = idxd->max_tokens;
250 
251 	/* read engine capabilities */
252 	idxd->hw.engine_cap.bits =
253 		ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
254 	dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
255 	idxd->max_engines = idxd->hw.engine_cap.num_engines;
256 	dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
257 
258 	/* read workqueue capabilities */
259 	idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
260 	dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
261 	idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
262 	dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
263 	idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
264 	dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
265 
266 	/* reading operation capabilities */
267 	for (i = 0; i < 4; i++) {
268 		idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
269 				IDXD_OPCAP_OFFSET + i * sizeof(u64));
270 		dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
271 	}
272 }
273 
274 static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
275 				      void __iomem * const *iomap)
276 {
277 	struct device *dev = &pdev->dev;
278 	struct idxd_device *idxd;
279 
280 	idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
281 	if (!idxd)
282 		return NULL;
283 
284 	idxd->pdev = pdev;
285 	idxd->reg_base = iomap[IDXD_MMIO_BAR];
286 	spin_lock_init(&idxd->dev_lock);
287 
288 	return idxd;
289 }
290 
291 static int idxd_probe(struct idxd_device *idxd)
292 {
293 	struct pci_dev *pdev = idxd->pdev;
294 	struct device *dev = &pdev->dev;
295 	int rc;
296 
297 	dev_dbg(dev, "%s entered and resetting device\n", __func__);
298 	rc = idxd_device_reset(idxd);
299 	if (rc < 0)
300 		return rc;
301 	dev_dbg(dev, "IDXD reset complete\n");
302 
303 	idxd_read_caps(idxd);
304 	idxd_read_table_offsets(idxd);
305 
306 	rc = idxd_setup_internals(idxd);
307 	if (rc)
308 		goto err_setup;
309 
310 	rc = idxd_setup_interrupts(idxd);
311 	if (rc)
312 		goto err_setup;
313 
314 	dev_dbg(dev, "IDXD interrupt setup complete.\n");
315 
316 	mutex_lock(&idxd_idr_lock);
317 	idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
318 	mutex_unlock(&idxd_idr_lock);
319 	if (idxd->id < 0) {
320 		rc = -ENOMEM;
321 		goto err_idr_fail;
322 	}
323 
324 	dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
325 	return 0;
326 
327  err_idr_fail:
328 	idxd_mask_error_interrupts(idxd);
329 	idxd_mask_msix_vectors(idxd);
330  err_setup:
331 	return rc;
332 }
333 
334 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
335 {
336 	void __iomem * const *iomap;
337 	struct device *dev = &pdev->dev;
338 	struct idxd_device *idxd;
339 	int rc;
340 	unsigned int mask;
341 
342 	rc = pcim_enable_device(pdev);
343 	if (rc)
344 		return rc;
345 
346 	dev_dbg(dev, "Mapping BARs\n");
347 	mask = (1 << IDXD_MMIO_BAR);
348 	rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
349 	if (rc)
350 		return rc;
351 
352 	iomap = pcim_iomap_table(pdev);
353 	if (!iomap)
354 		return -ENOMEM;
355 
356 	dev_dbg(dev, "Set DMA masks\n");
357 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
358 	if (rc)
359 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
360 	if (rc)
361 		return rc;
362 
363 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
364 	if (rc)
365 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
366 	if (rc)
367 		return rc;
368 
369 	dev_dbg(dev, "Alloc IDXD context\n");
370 	idxd = idxd_alloc(pdev, iomap);
371 	if (!idxd)
372 		return -ENOMEM;
373 
374 	idxd_set_type(idxd);
375 
376 	dev_dbg(dev, "Set PCI master\n");
377 	pci_set_master(pdev);
378 	pci_set_drvdata(pdev, idxd);
379 
380 	idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
381 	rc = idxd_probe(idxd);
382 	if (rc) {
383 		dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
384 		return -ENODEV;
385 	}
386 
387 	rc = idxd_setup_sysfs(idxd);
388 	if (rc) {
389 		dev_err(dev, "IDXD sysfs setup failed\n");
390 		return -ENODEV;
391 	}
392 
393 	idxd->state = IDXD_DEV_CONF_READY;
394 
395 	dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
396 		 idxd->hw.version);
397 
398 	return 0;
399 }
400 
401 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
402 {
403 	struct idxd_desc *desc, *itr;
404 	struct llist_node *head;
405 
406 	head = llist_del_all(&ie->pending_llist);
407 	if (!head)
408 		return;
409 
410 	llist_for_each_entry_safe(desc, itr, head, llnode) {
411 		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
412 		idxd_free_desc(desc->wq, desc);
413 	}
414 }
415 
416 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
417 {
418 	struct idxd_desc *desc, *iter;
419 
420 	list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
421 		list_del(&desc->list);
422 		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
423 		idxd_free_desc(desc->wq, desc);
424 	}
425 }
426 
427 static void idxd_shutdown(struct pci_dev *pdev)
428 {
429 	struct idxd_device *idxd = pci_get_drvdata(pdev);
430 	int rc, i;
431 	struct idxd_irq_entry *irq_entry;
432 	int msixcnt = pci_msix_vec_count(pdev);
433 	unsigned long flags;
434 
435 	spin_lock_irqsave(&idxd->dev_lock, flags);
436 	rc = idxd_device_disable(idxd);
437 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
438 	if (rc)
439 		dev_err(&pdev->dev, "Disabling device failed\n");
440 
441 	dev_dbg(&pdev->dev, "%s called\n", __func__);
442 	idxd_mask_msix_vectors(idxd);
443 	idxd_mask_error_interrupts(idxd);
444 
445 	for (i = 0; i < msixcnt; i++) {
446 		irq_entry = &idxd->irq_entries[i];
447 		synchronize_irq(idxd->msix_entries[i].vector);
448 		if (i == 0)
449 			continue;
450 		idxd_flush_pending_llist(irq_entry);
451 		idxd_flush_work_list(irq_entry);
452 	}
453 }
454 
455 static void idxd_remove(struct pci_dev *pdev)
456 {
457 	struct idxd_device *idxd = pci_get_drvdata(pdev);
458 
459 	dev_dbg(&pdev->dev, "%s called\n", __func__);
460 	idxd_cleanup_sysfs(idxd);
461 	idxd_shutdown(pdev);
462 	idxd_wqs_free_lock(idxd);
463 	mutex_lock(&idxd_idr_lock);
464 	idr_remove(&idxd_idrs[idxd->type], idxd->id);
465 	mutex_unlock(&idxd_idr_lock);
466 }
467 
468 static struct pci_driver idxd_pci_driver = {
469 	.name		= DRV_NAME,
470 	.id_table	= idxd_pci_tbl,
471 	.probe		= idxd_pci_probe,
472 	.remove		= idxd_remove,
473 	.shutdown	= idxd_shutdown,
474 };
475 
476 static int __init idxd_init_module(void)
477 {
478 	int err, i;
479 
480 	/*
481 	 * If the CPU does not support write512, there's no point in
482 	 * enumerating the device. We can not utilize it.
483 	 */
484 	if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
485 		pr_warn("idxd driver failed to load without MOVDIR64B.\n");
486 		return -ENODEV;
487 	}
488 
489 	pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
490 		DRV_NAME, IDXD_DRIVER_VERSION);
491 
492 	mutex_init(&idxd_idr_lock);
493 	for (i = 0; i < IDXD_TYPE_MAX; i++)
494 		idr_init(&idxd_idrs[i]);
495 
496 	err = idxd_register_bus_type();
497 	if (err < 0)
498 		return err;
499 
500 	err = idxd_register_driver();
501 	if (err < 0)
502 		goto err_idxd_driver_register;
503 
504 	err = pci_register_driver(&idxd_pci_driver);
505 	if (err)
506 		goto err_pci_register;
507 
508 	return 0;
509 
510 err_pci_register:
511 	idxd_unregister_driver();
512 err_idxd_driver_register:
513 	idxd_unregister_bus_type();
514 	return err;
515 }
516 module_init(idxd_init_module);
517 
518 static void __exit idxd_exit_module(void)
519 {
520 	pci_unregister_driver(&idxd_pci_driver);
521 	idxd_unregister_bus_type();
522 }
523 module_exit(idxd_exit_module);
524