xref: /openbmc/linux/drivers/dma/idxd/device.c (revision 8957261c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <linux/irq.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "idxd.h"
13 #include "registers.h"
14 
15 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
16 			  u32 *status);
17 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
18 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
19 
20 /* Interrupt control bits */
21 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
22 {
23 	union genctrl_reg genctrl;
24 
25 	genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
26 	genctrl.softerr_int_en = 1;
27 	genctrl.halt_int_en = 1;
28 	iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
29 }
30 
31 void idxd_mask_error_interrupts(struct idxd_device *idxd)
32 {
33 	union genctrl_reg genctrl;
34 
35 	genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
36 	genctrl.softerr_int_en = 0;
37 	genctrl.halt_int_en = 0;
38 	iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
39 }
40 
41 static void free_hw_descs(struct idxd_wq *wq)
42 {
43 	int i;
44 
45 	for (i = 0; i < wq->num_descs; i++)
46 		kfree(wq->hw_descs[i]);
47 
48 	kfree(wq->hw_descs);
49 }
50 
51 static int alloc_hw_descs(struct idxd_wq *wq, int num)
52 {
53 	struct device *dev = &wq->idxd->pdev->dev;
54 	int i;
55 	int node = dev_to_node(dev);
56 
57 	wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
58 				    GFP_KERNEL, node);
59 	if (!wq->hw_descs)
60 		return -ENOMEM;
61 
62 	for (i = 0; i < num; i++) {
63 		wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
64 					       GFP_KERNEL, node);
65 		if (!wq->hw_descs[i]) {
66 			free_hw_descs(wq);
67 			return -ENOMEM;
68 		}
69 	}
70 
71 	return 0;
72 }
73 
74 static void free_descs(struct idxd_wq *wq)
75 {
76 	int i;
77 
78 	for (i = 0; i < wq->num_descs; i++)
79 		kfree(wq->descs[i]);
80 
81 	kfree(wq->descs);
82 }
83 
84 static int alloc_descs(struct idxd_wq *wq, int num)
85 {
86 	struct device *dev = &wq->idxd->pdev->dev;
87 	int i;
88 	int node = dev_to_node(dev);
89 
90 	wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
91 				 GFP_KERNEL, node);
92 	if (!wq->descs)
93 		return -ENOMEM;
94 
95 	for (i = 0; i < num; i++) {
96 		wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
97 					    GFP_KERNEL, node);
98 		if (!wq->descs[i]) {
99 			free_descs(wq);
100 			return -ENOMEM;
101 		}
102 	}
103 
104 	return 0;
105 }
106 
107 /* WQ control bits */
108 int idxd_wq_alloc_resources(struct idxd_wq *wq)
109 {
110 	struct idxd_device *idxd = wq->idxd;
111 	struct device *dev = &idxd->pdev->dev;
112 	int rc, num_descs, i;
113 
114 	if (wq->type != IDXD_WQT_KERNEL)
115 		return 0;
116 
117 	num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
118 	wq->num_descs = num_descs;
119 
120 	rc = alloc_hw_descs(wq, num_descs);
121 	if (rc < 0)
122 		return rc;
123 
124 	wq->compls_size = num_descs * idxd->data->compl_size;
125 	wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
126 	if (!wq->compls) {
127 		rc = -ENOMEM;
128 		goto fail_alloc_compls;
129 	}
130 
131 	rc = alloc_descs(wq, num_descs);
132 	if (rc < 0)
133 		goto fail_alloc_descs;
134 
135 	rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
136 				     dev_to_node(dev));
137 	if (rc < 0)
138 		goto fail_sbitmap_init;
139 
140 	for (i = 0; i < num_descs; i++) {
141 		struct idxd_desc *desc = wq->descs[i];
142 
143 		desc->hw = wq->hw_descs[i];
144 		if (idxd->data->type == IDXD_TYPE_DSA)
145 			desc->completion = &wq->compls[i];
146 		else if (idxd->data->type == IDXD_TYPE_IAX)
147 			desc->iax_completion = &wq->iax_compls[i];
148 		desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
149 		desc->id = i;
150 		desc->wq = wq;
151 		desc->cpu = -1;
152 	}
153 
154 	return 0;
155 
156  fail_sbitmap_init:
157 	free_descs(wq);
158  fail_alloc_descs:
159 	dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
160  fail_alloc_compls:
161 	free_hw_descs(wq);
162 	return rc;
163 }
164 
165 void idxd_wq_free_resources(struct idxd_wq *wq)
166 {
167 	struct device *dev = &wq->idxd->pdev->dev;
168 
169 	if (wq->type != IDXD_WQT_KERNEL)
170 		return;
171 
172 	free_hw_descs(wq);
173 	free_descs(wq);
174 	dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
175 	sbitmap_queue_free(&wq->sbq);
176 }
177 
178 int idxd_wq_enable(struct idxd_wq *wq)
179 {
180 	struct idxd_device *idxd = wq->idxd;
181 	struct device *dev = &idxd->pdev->dev;
182 	u32 status;
183 
184 	if (wq->state == IDXD_WQ_ENABLED) {
185 		dev_dbg(dev, "WQ %d already enabled\n", wq->id);
186 		return 0;
187 	}
188 
189 	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
190 
191 	if (status != IDXD_CMDSTS_SUCCESS &&
192 	    status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
193 		dev_dbg(dev, "WQ enable failed: %#x\n", status);
194 		return -ENXIO;
195 	}
196 
197 	wq->state = IDXD_WQ_ENABLED;
198 	set_bit(wq->id, idxd->wq_enable_map);
199 	dev_dbg(dev, "WQ %d enabled\n", wq->id);
200 	return 0;
201 }
202 
203 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
204 {
205 	struct idxd_device *idxd = wq->idxd;
206 	struct device *dev = &idxd->pdev->dev;
207 	u32 status, operand;
208 
209 	dev_dbg(dev, "Disabling WQ %d\n", wq->id);
210 
211 	if (wq->state != IDXD_WQ_ENABLED) {
212 		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
213 		return 0;
214 	}
215 
216 	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
217 	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
218 
219 	if (status != IDXD_CMDSTS_SUCCESS) {
220 		dev_dbg(dev, "WQ disable failed: %#x\n", status);
221 		return -ENXIO;
222 	}
223 
224 	if (reset_config)
225 		idxd_wq_disable_cleanup(wq);
226 	clear_bit(wq->id, idxd->wq_enable_map);
227 	wq->state = IDXD_WQ_DISABLED;
228 	dev_dbg(dev, "WQ %d disabled\n", wq->id);
229 	return 0;
230 }
231 
232 void idxd_wq_drain(struct idxd_wq *wq)
233 {
234 	struct idxd_device *idxd = wq->idxd;
235 	struct device *dev = &idxd->pdev->dev;
236 	u32 operand;
237 
238 	if (wq->state != IDXD_WQ_ENABLED) {
239 		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
240 		return;
241 	}
242 
243 	dev_dbg(dev, "Draining WQ %d\n", wq->id);
244 	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
245 	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
246 }
247 
248 void idxd_wq_reset(struct idxd_wq *wq)
249 {
250 	struct idxd_device *idxd = wq->idxd;
251 	struct device *dev = &idxd->pdev->dev;
252 	u32 operand;
253 
254 	if (wq->state != IDXD_WQ_ENABLED) {
255 		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
256 		return;
257 	}
258 
259 	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
260 	idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
261 	idxd_wq_disable_cleanup(wq);
262 }
263 
264 int idxd_wq_map_portal(struct idxd_wq *wq)
265 {
266 	struct idxd_device *idxd = wq->idxd;
267 	struct pci_dev *pdev = idxd->pdev;
268 	struct device *dev = &pdev->dev;
269 	resource_size_t start;
270 
271 	start = pci_resource_start(pdev, IDXD_WQ_BAR);
272 	start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
273 
274 	wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
275 	if (!wq->portal)
276 		return -ENOMEM;
277 
278 	return 0;
279 }
280 
281 void idxd_wq_unmap_portal(struct idxd_wq *wq)
282 {
283 	struct device *dev = &wq->idxd->pdev->dev;
284 
285 	devm_iounmap(dev, wq->portal);
286 	wq->portal = NULL;
287 	wq->portal_offset = 0;
288 }
289 
290 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
291 {
292 	int i;
293 
294 	for (i = 0; i < idxd->max_wqs; i++) {
295 		struct idxd_wq *wq = idxd->wqs[i];
296 
297 		if (wq->portal)
298 			idxd_wq_unmap_portal(wq);
299 	}
300 }
301 
302 static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
303 {
304 	struct idxd_device *idxd = wq->idxd;
305 	union wqcfg wqcfg;
306 	unsigned int offset;
307 
308 	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
309 	spin_lock(&idxd->dev_lock);
310 	wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
311 	wqcfg.pasid_en = 1;
312 	wqcfg.pasid = pasid;
313 	wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
314 	iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
315 	spin_unlock(&idxd->dev_lock);
316 }
317 
318 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
319 {
320 	int rc;
321 
322 	rc = idxd_wq_disable(wq, false);
323 	if (rc < 0)
324 		return rc;
325 
326 	__idxd_wq_set_pasid_locked(wq, pasid);
327 
328 	rc = idxd_wq_enable(wq);
329 	if (rc < 0)
330 		return rc;
331 
332 	return 0;
333 }
334 
335 int idxd_wq_disable_pasid(struct idxd_wq *wq)
336 {
337 	struct idxd_device *idxd = wq->idxd;
338 	int rc;
339 	union wqcfg wqcfg;
340 	unsigned int offset;
341 
342 	rc = idxd_wq_disable(wq, false);
343 	if (rc < 0)
344 		return rc;
345 
346 	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
347 	spin_lock(&idxd->dev_lock);
348 	wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
349 	wqcfg.pasid_en = 0;
350 	wqcfg.pasid = 0;
351 	iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
352 	spin_unlock(&idxd->dev_lock);
353 
354 	rc = idxd_wq_enable(wq);
355 	if (rc < 0)
356 		return rc;
357 
358 	return 0;
359 }
360 
361 static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
362 {
363 	struct idxd_device *idxd = wq->idxd;
364 
365 	lockdep_assert_held(&wq->wq_lock);
366 	wq->state = IDXD_WQ_DISABLED;
367 	memset(wq->wqcfg, 0, idxd->wqcfg_size);
368 	wq->type = IDXD_WQT_NONE;
369 	wq->threshold = 0;
370 	wq->priority = 0;
371 	wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
372 	wq->flags = 0;
373 	memset(wq->name, 0, WQ_NAME_SIZE);
374 	wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
375 	idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
376 	if (wq->opcap_bmap)
377 		bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
378 }
379 
380 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
381 {
382 	lockdep_assert_held(&wq->wq_lock);
383 
384 	wq->size = 0;
385 	wq->group = NULL;
386 }
387 
388 static void idxd_wq_ref_release(struct percpu_ref *ref)
389 {
390 	struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
391 
392 	complete(&wq->wq_dead);
393 }
394 
395 int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
396 {
397 	int rc;
398 
399 	memset(&wq->wq_active, 0, sizeof(wq->wq_active));
400 	rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
401 			     PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
402 	if (rc < 0)
403 		return rc;
404 	reinit_completion(&wq->wq_dead);
405 	reinit_completion(&wq->wq_resurrect);
406 	return 0;
407 }
408 
409 void __idxd_wq_quiesce(struct idxd_wq *wq)
410 {
411 	lockdep_assert_held(&wq->wq_lock);
412 	reinit_completion(&wq->wq_resurrect);
413 	percpu_ref_kill(&wq->wq_active);
414 	complete_all(&wq->wq_resurrect);
415 	wait_for_completion(&wq->wq_dead);
416 }
417 
418 void idxd_wq_quiesce(struct idxd_wq *wq)
419 {
420 	mutex_lock(&wq->wq_lock);
421 	__idxd_wq_quiesce(wq);
422 	mutex_unlock(&wq->wq_lock);
423 }
424 
425 /* Device control bits */
426 static inline bool idxd_is_enabled(struct idxd_device *idxd)
427 {
428 	union gensts_reg gensts;
429 
430 	gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
431 
432 	if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
433 		return true;
434 	return false;
435 }
436 
437 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
438 {
439 	union gensts_reg gensts;
440 
441 	gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
442 
443 	return (gensts.state == IDXD_DEVICE_STATE_HALT);
444 }
445 
446 /*
447  * This is function is only used for reset during probe and will
448  * poll for completion. Once the device is setup with interrupts,
449  * all commands will be done via interrupt completion.
450  */
451 int idxd_device_init_reset(struct idxd_device *idxd)
452 {
453 	struct device *dev = &idxd->pdev->dev;
454 	union idxd_command_reg cmd;
455 
456 	if (idxd_device_is_halted(idxd)) {
457 		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
458 		return -ENXIO;
459 	}
460 
461 	memset(&cmd, 0, sizeof(cmd));
462 	cmd.cmd = IDXD_CMD_RESET_DEVICE;
463 	dev_dbg(dev, "%s: sending reset for init.\n", __func__);
464 	spin_lock(&idxd->cmd_lock);
465 	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
466 
467 	while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
468 	       IDXD_CMDSTS_ACTIVE)
469 		cpu_relax();
470 	spin_unlock(&idxd->cmd_lock);
471 	return 0;
472 }
473 
474 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
475 			  u32 *status)
476 {
477 	union idxd_command_reg cmd;
478 	DECLARE_COMPLETION_ONSTACK(done);
479 	u32 stat;
480 
481 	if (idxd_device_is_halted(idxd)) {
482 		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
483 		if (status)
484 			*status = IDXD_CMDSTS_HW_ERR;
485 		return;
486 	}
487 
488 	memset(&cmd, 0, sizeof(cmd));
489 	cmd.cmd = cmd_code;
490 	cmd.operand = operand;
491 	cmd.int_req = 1;
492 
493 	spin_lock(&idxd->cmd_lock);
494 	wait_event_lock_irq(idxd->cmd_waitq,
495 			    !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
496 			    idxd->cmd_lock);
497 
498 	dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
499 		__func__, cmd_code, operand);
500 
501 	idxd->cmd_status = 0;
502 	__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
503 	idxd->cmd_done = &done;
504 	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
505 
506 	/*
507 	 * After command submitted, release lock and go to sleep until
508 	 * the command completes via interrupt.
509 	 */
510 	spin_unlock(&idxd->cmd_lock);
511 	wait_for_completion(&done);
512 	stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
513 	spin_lock(&idxd->cmd_lock);
514 	if (status)
515 		*status = stat;
516 	idxd->cmd_status = stat & GENMASK(7, 0);
517 
518 	__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
519 	/* Wake up other pending commands */
520 	wake_up(&idxd->cmd_waitq);
521 	spin_unlock(&idxd->cmd_lock);
522 }
523 
524 int idxd_device_enable(struct idxd_device *idxd)
525 {
526 	struct device *dev = &idxd->pdev->dev;
527 	u32 status;
528 
529 	if (idxd_is_enabled(idxd)) {
530 		dev_dbg(dev, "Device already enabled\n");
531 		return -ENXIO;
532 	}
533 
534 	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
535 
536 	/* If the command is successful or if the device was enabled */
537 	if (status != IDXD_CMDSTS_SUCCESS &&
538 	    status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
539 		dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
540 		return -ENXIO;
541 	}
542 
543 	idxd->state = IDXD_DEV_ENABLED;
544 	return 0;
545 }
546 
547 int idxd_device_disable(struct idxd_device *idxd)
548 {
549 	struct device *dev = &idxd->pdev->dev;
550 	u32 status;
551 
552 	if (!idxd_is_enabled(idxd)) {
553 		dev_dbg(dev, "Device is not enabled\n");
554 		return 0;
555 	}
556 
557 	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
558 
559 	/* If the command is successful or if the device was disabled */
560 	if (status != IDXD_CMDSTS_SUCCESS &&
561 	    !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
562 		dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
563 		return -ENXIO;
564 	}
565 
566 	idxd_device_clear_state(idxd);
567 	return 0;
568 }
569 
570 void idxd_device_reset(struct idxd_device *idxd)
571 {
572 	idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
573 	idxd_device_clear_state(idxd);
574 	spin_lock(&idxd->dev_lock);
575 	idxd_unmask_error_interrupts(idxd);
576 	spin_unlock(&idxd->dev_lock);
577 }
578 
579 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
580 {
581 	struct device *dev = &idxd->pdev->dev;
582 	u32 operand;
583 
584 	operand = pasid;
585 	dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
586 	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
587 	dev_dbg(dev, "pasid %d drained\n", pasid);
588 }
589 
590 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
591 				   enum idxd_interrupt_type irq_type)
592 {
593 	struct device *dev = &idxd->pdev->dev;
594 	u32 operand, status;
595 
596 	if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
597 		return -EOPNOTSUPP;
598 
599 	dev_dbg(dev, "get int handle, idx %d\n", idx);
600 
601 	operand = idx & GENMASK(15, 0);
602 	if (irq_type == IDXD_IRQ_IMS)
603 		operand |= CMD_INT_HANDLE_IMS;
604 
605 	dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
606 
607 	idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
608 
609 	if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
610 		dev_dbg(dev, "request int handle failed: %#x\n", status);
611 		return -ENXIO;
612 	}
613 
614 	*handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
615 
616 	dev_dbg(dev, "int handle acquired: %u\n", *handle);
617 	return 0;
618 }
619 
620 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
621 				   enum idxd_interrupt_type irq_type)
622 {
623 	struct device *dev = &idxd->pdev->dev;
624 	u32 operand, status;
625 	union idxd_command_reg cmd;
626 
627 	if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
628 		return -EOPNOTSUPP;
629 
630 	dev_dbg(dev, "release int handle, handle %d\n", handle);
631 
632 	memset(&cmd, 0, sizeof(cmd));
633 	operand = handle & GENMASK(15, 0);
634 
635 	if (irq_type == IDXD_IRQ_IMS)
636 		operand |= CMD_INT_HANDLE_IMS;
637 
638 	cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
639 	cmd.operand = operand;
640 
641 	dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
642 
643 	spin_lock(&idxd->cmd_lock);
644 	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
645 
646 	while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
647 		cpu_relax();
648 	status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
649 	spin_unlock(&idxd->cmd_lock);
650 
651 	if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
652 		dev_dbg(dev, "release int handle failed: %#x\n", status);
653 		return -ENXIO;
654 	}
655 
656 	dev_dbg(dev, "int handle released.\n");
657 	return 0;
658 }
659 
660 /* Device configuration bits */
661 static void idxd_engines_clear_state(struct idxd_device *idxd)
662 {
663 	struct idxd_engine *engine;
664 	int i;
665 
666 	lockdep_assert_held(&idxd->dev_lock);
667 	for (i = 0; i < idxd->max_engines; i++) {
668 		engine = idxd->engines[i];
669 		engine->group = NULL;
670 	}
671 }
672 
673 static void idxd_groups_clear_state(struct idxd_device *idxd)
674 {
675 	struct idxd_group *group;
676 	int i;
677 
678 	lockdep_assert_held(&idxd->dev_lock);
679 	for (i = 0; i < idxd->max_groups; i++) {
680 		group = idxd->groups[i];
681 		memset(&group->grpcfg, 0, sizeof(group->grpcfg));
682 		group->num_engines = 0;
683 		group->num_wqs = 0;
684 		group->use_rdbuf_limit = false;
685 		/*
686 		 * The default value is the same as the value of
687 		 * total read buffers in GRPCAP.
688 		 */
689 		group->rdbufs_allowed = idxd->max_rdbufs;
690 		group->rdbufs_reserved = 0;
691 		if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
692 			group->tc_a = 1;
693 			group->tc_b = 1;
694 		} else {
695 			group->tc_a = -1;
696 			group->tc_b = -1;
697 		}
698 		group->desc_progress_limit = 0;
699 		group->batch_progress_limit = 0;
700 	}
701 }
702 
703 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
704 {
705 	int i;
706 
707 	for (i = 0; i < idxd->max_wqs; i++) {
708 		struct idxd_wq *wq = idxd->wqs[i];
709 
710 		mutex_lock(&wq->wq_lock);
711 		idxd_wq_disable_cleanup(wq);
712 		idxd_wq_device_reset_cleanup(wq);
713 		mutex_unlock(&wq->wq_lock);
714 	}
715 }
716 
717 void idxd_device_clear_state(struct idxd_device *idxd)
718 {
719 	/* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
720 	if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
721 		/*
722 		 * Clearing wq state is protected by wq lock.
723 		 * So no need to be protected by device lock.
724 		 */
725 		idxd_device_wqs_clear_state(idxd);
726 
727 		spin_lock(&idxd->dev_lock);
728 		idxd_groups_clear_state(idxd);
729 		idxd_engines_clear_state(idxd);
730 	} else {
731 		spin_lock(&idxd->dev_lock);
732 	}
733 
734 	idxd->state = IDXD_DEV_DISABLED;
735 	spin_unlock(&idxd->dev_lock);
736 }
737 
738 static int idxd_device_evl_setup(struct idxd_device *idxd)
739 {
740 	union gencfg_reg gencfg;
741 	union evlcfg_reg evlcfg;
742 	union genctrl_reg genctrl;
743 	struct device *dev = &idxd->pdev->dev;
744 	void *addr;
745 	dma_addr_t dma_addr;
746 	int size;
747 	struct idxd_evl *evl = idxd->evl;
748 	unsigned long *bmap;
749 	int rc;
750 
751 	if (!evl)
752 		return 0;
753 
754 	size = evl_size(idxd);
755 
756 	bmap = bitmap_zalloc(size, GFP_KERNEL);
757 	if (!bmap) {
758 		rc = -ENOMEM;
759 		goto err_bmap;
760 	}
761 
762 	/*
763 	 * Address needs to be page aligned. However, dma_alloc_coherent() provides
764 	 * at minimal page size aligned address. No manual alignment required.
765 	 */
766 	addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
767 	if (!addr) {
768 		rc = -ENOMEM;
769 		goto err_alloc;
770 	}
771 
772 	spin_lock(&evl->lock);
773 	evl->log = addr;
774 	evl->dma = dma_addr;
775 	evl->log_size = size;
776 	evl->bmap = bmap;
777 
778 	memset(&evlcfg, 0, sizeof(evlcfg));
779 	evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
780 	evlcfg.size = evl->size;
781 
782 	iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
783 	iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
784 
785 	genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
786 	genctrl.evl_int_en = 1;
787 	iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
788 
789 	gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
790 	gencfg.evl_en = 1;
791 	iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
792 
793 	spin_unlock(&evl->lock);
794 	return 0;
795 
796 err_alloc:
797 	bitmap_free(bmap);
798 err_bmap:
799 	return rc;
800 }
801 
802 static void idxd_device_evl_free(struct idxd_device *idxd)
803 {
804 	union gencfg_reg gencfg;
805 	union genctrl_reg genctrl;
806 	struct device *dev = &idxd->pdev->dev;
807 	struct idxd_evl *evl = idxd->evl;
808 
809 	gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
810 	if (!gencfg.evl_en)
811 		return;
812 
813 	spin_lock(&evl->lock);
814 	gencfg.evl_en = 0;
815 	iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
816 
817 	genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
818 	genctrl.evl_int_en = 0;
819 	iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
820 
821 	iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
822 	iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
823 
824 	dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
825 	bitmap_free(evl->bmap);
826 	evl->log = NULL;
827 	evl->size = IDXD_EVL_SIZE_MIN;
828 	spin_unlock(&evl->lock);
829 }
830 
831 static void idxd_group_config_write(struct idxd_group *group)
832 {
833 	struct idxd_device *idxd = group->idxd;
834 	struct device *dev = &idxd->pdev->dev;
835 	int i;
836 	u32 grpcfg_offset;
837 
838 	dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
839 
840 	/* setup GRPWQCFG */
841 	for (i = 0; i < GRPWQCFG_STRIDES; i++) {
842 		grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
843 		iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
844 		dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
845 			group->id, i, grpcfg_offset,
846 			ioread64(idxd->reg_base + grpcfg_offset));
847 	}
848 
849 	/* setup GRPENGCFG */
850 	grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
851 	iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
852 	dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
853 		grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
854 
855 	/* setup GRPFLAGS */
856 	grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
857 	iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
858 	dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
859 		group->id, grpcfg_offset,
860 		ioread64(idxd->reg_base + grpcfg_offset));
861 }
862 
863 static int idxd_groups_config_write(struct idxd_device *idxd)
864 
865 {
866 	union gencfg_reg reg;
867 	int i;
868 	struct device *dev = &idxd->pdev->dev;
869 
870 	/* Setup bandwidth rdbuf limit */
871 	if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
872 		reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
873 		reg.rdbuf_limit = idxd->rdbuf_limit;
874 		iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
875 	}
876 
877 	dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
878 		ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
879 
880 	for (i = 0; i < idxd->max_groups; i++) {
881 		struct idxd_group *group = idxd->groups[i];
882 
883 		idxd_group_config_write(group);
884 	}
885 
886 	return 0;
887 }
888 
889 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
890 {
891 	struct pci_dev *pdev = idxd->pdev;
892 
893 	if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
894 		return true;
895 	return false;
896 }
897 
898 static int idxd_wq_config_write(struct idxd_wq *wq)
899 {
900 	struct idxd_device *idxd = wq->idxd;
901 	struct device *dev = &idxd->pdev->dev;
902 	u32 wq_offset;
903 	int i, n;
904 
905 	if (!wq->group)
906 		return 0;
907 
908 	/*
909 	 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
910 	 * wq reset. This will copy back the sticky values that are present on some devices.
911 	 */
912 	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
913 		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
914 		wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
915 	}
916 
917 	if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
918 		wq->size = WQ_DEFAULT_QUEUE_DEPTH;
919 
920 	/* byte 0-3 */
921 	wq->wqcfg->wq_size = wq->size;
922 
923 	/* bytes 4-7 */
924 	wq->wqcfg->wq_thresh = wq->threshold;
925 
926 	/* byte 8-11 */
927 	if (wq_dedicated(wq))
928 		wq->wqcfg->mode = 1;
929 
930 	/*
931 	 * The WQ priv bit is set depending on the WQ type. priv = 1 if the
932 	 * WQ type is kernel to indicate privileged access. This setting only
933 	 * matters for dedicated WQ. According to the DSA spec:
934 	 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
935 	 * Privileged Mode Enable field of the PCI Express PASID capability
936 	 * is 0, this field must be 0.
937 	 *
938 	 * In the case of a dedicated kernel WQ that is not able to support
939 	 * the PASID cap, then the configuration will be rejected.
940 	 */
941 	if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
942 	    !idxd_device_pasid_priv_enabled(idxd) &&
943 	    wq->type == IDXD_WQT_KERNEL) {
944 		idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
945 		return -EOPNOTSUPP;
946 	}
947 
948 	wq->wqcfg->priority = wq->priority;
949 
950 	if (idxd->hw.gen_cap.block_on_fault &&
951 	    test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) &&
952 	    !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
953 		wq->wqcfg->bof = 1;
954 
955 	if (idxd->hw.wq_cap.wq_ats_support)
956 		wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
957 
958 	if (idxd->hw.wq_cap.wq_prs_support)
959 		wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
960 
961 	/* bytes 12-15 */
962 	wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
963 	idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
964 
965 	/* bytes 32-63 */
966 	if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
967 		memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
968 		for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
969 			int pos = n % BITS_PER_LONG_LONG;
970 			int idx = n / BITS_PER_LONG_LONG;
971 
972 			wq->wqcfg->op_config[idx] |= BIT(pos);
973 		}
974 	}
975 
976 	dev_dbg(dev, "WQ %d CFGs\n", wq->id);
977 	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
978 		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
979 		iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
980 		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
981 			wq->id, i, wq_offset,
982 			ioread32(idxd->reg_base + wq_offset));
983 	}
984 
985 	return 0;
986 }
987 
988 static int idxd_wqs_config_write(struct idxd_device *idxd)
989 {
990 	int i, rc;
991 
992 	for (i = 0; i < idxd->max_wqs; i++) {
993 		struct idxd_wq *wq = idxd->wqs[i];
994 
995 		rc = idxd_wq_config_write(wq);
996 		if (rc < 0)
997 			return rc;
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 static void idxd_group_flags_setup(struct idxd_device *idxd)
1004 {
1005 	int i;
1006 
1007 	/* TC-A 0 and TC-B 1 should be defaults */
1008 	for (i = 0; i < idxd->max_groups; i++) {
1009 		struct idxd_group *group = idxd->groups[i];
1010 
1011 		if (group->tc_a == -1)
1012 			group->tc_a = group->grpcfg.flags.tc_a = 0;
1013 		else
1014 			group->grpcfg.flags.tc_a = group->tc_a;
1015 		if (group->tc_b == -1)
1016 			group->tc_b = group->grpcfg.flags.tc_b = 1;
1017 		else
1018 			group->grpcfg.flags.tc_b = group->tc_b;
1019 		group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
1020 		group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
1021 		group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
1022 		group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
1023 		group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
1024 	}
1025 }
1026 
1027 static int idxd_engines_setup(struct idxd_device *idxd)
1028 {
1029 	int i, engines = 0;
1030 	struct idxd_engine *eng;
1031 	struct idxd_group *group;
1032 
1033 	for (i = 0; i < idxd->max_groups; i++) {
1034 		group = idxd->groups[i];
1035 		group->grpcfg.engines = 0;
1036 	}
1037 
1038 	for (i = 0; i < idxd->max_engines; i++) {
1039 		eng = idxd->engines[i];
1040 		group = eng->group;
1041 
1042 		if (!group)
1043 			continue;
1044 
1045 		group->grpcfg.engines |= BIT(eng->id);
1046 		engines++;
1047 	}
1048 
1049 	if (!engines)
1050 		return -EINVAL;
1051 
1052 	return 0;
1053 }
1054 
1055 static int idxd_wqs_setup(struct idxd_device *idxd)
1056 {
1057 	struct idxd_wq *wq;
1058 	struct idxd_group *group;
1059 	int i, j, configured = 0;
1060 	struct device *dev = &idxd->pdev->dev;
1061 
1062 	for (i = 0; i < idxd->max_groups; i++) {
1063 		group = idxd->groups[i];
1064 		for (j = 0; j < 4; j++)
1065 			group->grpcfg.wqs[j] = 0;
1066 	}
1067 
1068 	for (i = 0; i < idxd->max_wqs; i++) {
1069 		wq = idxd->wqs[i];
1070 		group = wq->group;
1071 
1072 		if (!wq->group)
1073 			continue;
1074 
1075 		if (wq_shared(wq) && !wq_shared_supported(wq)) {
1076 			idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1077 			dev_warn(dev, "No shared wq support but configured.\n");
1078 			return -EINVAL;
1079 		}
1080 
1081 		group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1082 		configured++;
1083 	}
1084 
1085 	if (configured == 0) {
1086 		idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1087 		return -EINVAL;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 int idxd_device_config(struct idxd_device *idxd)
1094 {
1095 	int rc;
1096 
1097 	lockdep_assert_held(&idxd->dev_lock);
1098 	rc = idxd_wqs_setup(idxd);
1099 	if (rc < 0)
1100 		return rc;
1101 
1102 	rc = idxd_engines_setup(idxd);
1103 	if (rc < 0)
1104 		return rc;
1105 
1106 	idxd_group_flags_setup(idxd);
1107 
1108 	rc = idxd_wqs_config_write(idxd);
1109 	if (rc < 0)
1110 		return rc;
1111 
1112 	rc = idxd_groups_config_write(idxd);
1113 	if (rc < 0)
1114 		return rc;
1115 
1116 	return 0;
1117 }
1118 
1119 static int idxd_wq_load_config(struct idxd_wq *wq)
1120 {
1121 	struct idxd_device *idxd = wq->idxd;
1122 	struct device *dev = &idxd->pdev->dev;
1123 	int wqcfg_offset;
1124 	int i;
1125 
1126 	wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1127 	memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1128 
1129 	wq->size = wq->wqcfg->wq_size;
1130 	wq->threshold = wq->wqcfg->wq_thresh;
1131 
1132 	/* The driver does not support shared WQ mode in read-only config yet */
1133 	if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1134 		return -EOPNOTSUPP;
1135 
1136 	set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1137 
1138 	wq->priority = wq->wqcfg->priority;
1139 
1140 	wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
1141 	idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
1142 
1143 	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1144 		wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1145 		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 static void idxd_group_load_config(struct idxd_group *group)
1152 {
1153 	struct idxd_device *idxd = group->idxd;
1154 	struct device *dev = &idxd->pdev->dev;
1155 	int i, j, grpcfg_offset;
1156 
1157 	/*
1158 	 * Load WQS bit fields
1159 	 * Iterate through all 256 bits 64 bits at a time
1160 	 */
1161 	for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1162 		struct idxd_wq *wq;
1163 
1164 		grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1165 		group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1166 		dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1167 			group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1168 
1169 		if (i * 64 >= idxd->max_wqs)
1170 			break;
1171 
1172 		/* Iterate through all 64 bits and check for wq set */
1173 		for (j = 0; j < 64; j++) {
1174 			int id = i * 64 + j;
1175 
1176 			/* No need to check beyond max wqs */
1177 			if (id >= idxd->max_wqs)
1178 				break;
1179 
1180 			/* Set group assignment for wq if wq bit is set */
1181 			if (group->grpcfg.wqs[i] & BIT(j)) {
1182 				wq = idxd->wqs[id];
1183 				wq->group = group;
1184 			}
1185 		}
1186 	}
1187 
1188 	grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1189 	group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1190 	dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1191 		grpcfg_offset, group->grpcfg.engines);
1192 
1193 	/* Iterate through all 64 bits to check engines set */
1194 	for (i = 0; i < 64; i++) {
1195 		if (i >= idxd->max_engines)
1196 			break;
1197 
1198 		if (group->grpcfg.engines & BIT(i)) {
1199 			struct idxd_engine *engine = idxd->engines[i];
1200 
1201 			engine->group = group;
1202 		}
1203 	}
1204 
1205 	grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1206 	group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
1207 	dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
1208 		group->id, grpcfg_offset, group->grpcfg.flags.bits);
1209 }
1210 
1211 int idxd_device_load_config(struct idxd_device *idxd)
1212 {
1213 	union gencfg_reg reg;
1214 	int i, rc;
1215 
1216 	reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1217 	idxd->rdbuf_limit = reg.rdbuf_limit;
1218 
1219 	for (i = 0; i < idxd->max_groups; i++) {
1220 		struct idxd_group *group = idxd->groups[i];
1221 
1222 		idxd_group_load_config(group);
1223 	}
1224 
1225 	for (i = 0; i < idxd->max_wqs; i++) {
1226 		struct idxd_wq *wq = idxd->wqs[i];
1227 
1228 		rc = idxd_wq_load_config(wq);
1229 		if (rc < 0)
1230 			return rc;
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
1237 {
1238 	struct idxd_desc *desc, *itr;
1239 	struct llist_node *head;
1240 	LIST_HEAD(flist);
1241 	enum idxd_complete_type ctype;
1242 
1243 	spin_lock(&ie->list_lock);
1244 	head = llist_del_all(&ie->pending_llist);
1245 	if (head) {
1246 		llist_for_each_entry_safe(desc, itr, head, llnode)
1247 			list_add_tail(&desc->list, &ie->work_list);
1248 	}
1249 
1250 	list_for_each_entry_safe(desc, itr, &ie->work_list, list)
1251 		list_move_tail(&desc->list, &flist);
1252 	spin_unlock(&ie->list_lock);
1253 
1254 	list_for_each_entry_safe(desc, itr, &flist, list) {
1255 		struct dma_async_tx_descriptor *tx;
1256 
1257 		list_del(&desc->list);
1258 		ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
1259 		/*
1260 		 * wq is being disabled. Any remaining descriptors are
1261 		 * likely to be stuck and can be dropped. callback could
1262 		 * point to code that is no longer accessible, for example
1263 		 * if dmatest module has been unloaded.
1264 		 */
1265 		tx = &desc->txd;
1266 		tx->callback = NULL;
1267 		tx->callback_result = NULL;
1268 		idxd_dma_complete_txd(desc, ctype, true);
1269 	}
1270 }
1271 
1272 static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1273 				       struct idxd_irq_entry *ie)
1274 {
1275 	union msix_perm mperm;
1276 
1277 	if (ie->pasid == IOMMU_PASID_INVALID)
1278 		return;
1279 
1280 	mperm.bits = 0;
1281 	mperm.pasid = ie->pasid;
1282 	mperm.pasid_en = 1;
1283 	iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1284 }
1285 
1286 static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1287 					 struct idxd_irq_entry *ie)
1288 {
1289 	iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1290 }
1291 
1292 void idxd_wq_free_irq(struct idxd_wq *wq)
1293 {
1294 	struct idxd_device *idxd = wq->idxd;
1295 	struct idxd_irq_entry *ie = &wq->ie;
1296 
1297 	if (wq->type != IDXD_WQT_KERNEL)
1298 		return;
1299 
1300 	free_irq(ie->vector, ie);
1301 	idxd_flush_pending_descs(ie);
1302 	if (idxd->request_int_handles)
1303 		idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1304 	idxd_device_clear_perm_entry(idxd, ie);
1305 	ie->vector = -1;
1306 	ie->int_handle = INVALID_INT_HANDLE;
1307 	ie->pasid = IOMMU_PASID_INVALID;
1308 }
1309 
1310 int idxd_wq_request_irq(struct idxd_wq *wq)
1311 {
1312 	struct idxd_device *idxd = wq->idxd;
1313 	struct pci_dev *pdev = idxd->pdev;
1314 	struct device *dev = &pdev->dev;
1315 	struct idxd_irq_entry *ie;
1316 	int rc;
1317 
1318 	if (wq->type != IDXD_WQT_KERNEL)
1319 		return 0;
1320 
1321 	ie = &wq->ie;
1322 	ie->vector = pci_irq_vector(pdev, ie->id);
1323 	ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID;
1324 	idxd_device_set_perm_entry(idxd, ie);
1325 
1326 	rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1327 	if (rc < 0) {
1328 		dev_err(dev, "Failed to request irq %d.\n", ie->vector);
1329 		goto err_irq;
1330 	}
1331 
1332 	if (idxd->request_int_handles) {
1333 		rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1334 						    IDXD_IRQ_MSIX);
1335 		if (rc < 0)
1336 			goto err_int_handle;
1337 	} else {
1338 		ie->int_handle = ie->id;
1339 	}
1340 
1341 	return 0;
1342 
1343 err_int_handle:
1344 	ie->int_handle = INVALID_INT_HANDLE;
1345 	free_irq(ie->vector, ie);
1346 err_irq:
1347 	idxd_device_clear_perm_entry(idxd, ie);
1348 	ie->pasid = IOMMU_PASID_INVALID;
1349 	return rc;
1350 }
1351 
1352 int drv_enable_wq(struct idxd_wq *wq)
1353 {
1354 	struct idxd_device *idxd = wq->idxd;
1355 	struct device *dev = &idxd->pdev->dev;
1356 	int rc = -ENXIO;
1357 
1358 	lockdep_assert_held(&wq->wq_lock);
1359 
1360 	if (idxd->state != IDXD_DEV_ENABLED) {
1361 		idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1362 		goto err;
1363 	}
1364 
1365 	if (wq->state != IDXD_WQ_DISABLED) {
1366 		dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1367 		idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1368 		rc = -EBUSY;
1369 		goto err;
1370 	}
1371 
1372 	if (!wq->group) {
1373 		dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1374 		idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1375 		goto err;
1376 	}
1377 
1378 	if (strlen(wq->name) == 0) {
1379 		idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1380 		dev_dbg(dev, "wq %d name not set.\n", wq->id);
1381 		goto err;
1382 	}
1383 
1384 	/* Shared WQ checks */
1385 	if (wq_shared(wq)) {
1386 		if (!wq_shared_supported(wq)) {
1387 			idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1388 			dev_dbg(dev, "PASID not enabled and shared wq.\n");
1389 			goto err;
1390 		}
1391 		/*
1392 		 * Shared wq with the threshold set to 0 means the user
1393 		 * did not set the threshold or transitioned from a
1394 		 * dedicated wq but did not set threshold. A value
1395 		 * of 0 would effectively disable the shared wq. The
1396 		 * driver does not allow a value of 0 to be set for
1397 		 * threshold via sysfs.
1398 		 */
1399 		if (wq->threshold == 0) {
1400 			idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1401 			dev_dbg(dev, "Shared wq and threshold 0.\n");
1402 			goto err;
1403 		}
1404 	}
1405 
1406 	/*
1407 	 * In the event that the WQ is configurable for pasid, the driver
1408 	 * should setup the pasid, pasid_en bit. This is true for both kernel
1409 	 * and user shared workqueues. There is no need to setup priv bit in
1410 	 * that in-kernel DMA will also do user privileged requests.
1411 	 * A dedicated wq that is not 'kernel' type will configure pasid and
1412 	 * pasid_en later on so there is no need to setup.
1413 	 */
1414 	if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1415 		if (wq_pasid_enabled(wq)) {
1416 			if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
1417 				u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
1418 
1419 				__idxd_wq_set_pasid_locked(wq, pasid);
1420 			}
1421 		}
1422 	}
1423 
1424 	rc = 0;
1425 	spin_lock(&idxd->dev_lock);
1426 	if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1427 		rc = idxd_device_config(idxd);
1428 	spin_unlock(&idxd->dev_lock);
1429 	if (rc < 0) {
1430 		dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1431 		goto err;
1432 	}
1433 
1434 	rc = idxd_wq_enable(wq);
1435 	if (rc < 0) {
1436 		dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1437 		goto err;
1438 	}
1439 
1440 	rc = idxd_wq_map_portal(wq);
1441 	if (rc < 0) {
1442 		idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1443 		dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1444 		goto err_map_portal;
1445 	}
1446 
1447 	wq->client_count = 0;
1448 
1449 	rc = idxd_wq_request_irq(wq);
1450 	if (rc < 0) {
1451 		idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
1452 		dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
1453 		goto err_irq;
1454 	}
1455 
1456 	rc = idxd_wq_alloc_resources(wq);
1457 	if (rc < 0) {
1458 		idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
1459 		dev_dbg(dev, "WQ resource alloc failed\n");
1460 		goto err_res_alloc;
1461 	}
1462 
1463 	rc = idxd_wq_init_percpu_ref(wq);
1464 	if (rc < 0) {
1465 		idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
1466 		dev_dbg(dev, "percpu_ref setup failed\n");
1467 		goto err_ref;
1468 	}
1469 
1470 	return 0;
1471 
1472 err_ref:
1473 	idxd_wq_free_resources(wq);
1474 err_res_alloc:
1475 	idxd_wq_free_irq(wq);
1476 err_irq:
1477 	idxd_wq_unmap_portal(wq);
1478 err_map_portal:
1479 	if (idxd_wq_disable(wq, false))
1480 		dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1481 err:
1482 	return rc;
1483 }
1484 
1485 void drv_disable_wq(struct idxd_wq *wq)
1486 {
1487 	struct idxd_device *idxd = wq->idxd;
1488 	struct device *dev = &idxd->pdev->dev;
1489 
1490 	lockdep_assert_held(&wq->wq_lock);
1491 
1492 	if (idxd_wq_refcount(wq))
1493 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
1494 			 wq->id, idxd_wq_refcount(wq));
1495 
1496 	idxd_wq_unmap_portal(wq);
1497 	idxd_wq_drain(wq);
1498 	idxd_wq_free_irq(wq);
1499 	idxd_wq_reset(wq);
1500 	idxd_wq_free_resources(wq);
1501 	percpu_ref_exit(&wq->wq_active);
1502 	wq->type = IDXD_WQT_NONE;
1503 	wq->client_count = 0;
1504 }
1505 
1506 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1507 {
1508 	struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1509 	int rc = 0;
1510 
1511 	/*
1512 	 * Device should be in disabled state for the idxd_drv to load. If it's in
1513 	 * enabled state, then the device was altered outside of driver's control.
1514 	 * If the state is in halted state, then we don't want to proceed.
1515 	 */
1516 	if (idxd->state != IDXD_DEV_DISABLED) {
1517 		idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1518 		return -ENXIO;
1519 	}
1520 
1521 	/* Device configuration */
1522 	spin_lock(&idxd->dev_lock);
1523 	if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1524 		rc = idxd_device_config(idxd);
1525 	spin_unlock(&idxd->dev_lock);
1526 	if (rc < 0)
1527 		return -ENXIO;
1528 
1529 	/*
1530 	 * System PASID is preserved across device disable/enable cycle, but
1531 	 * genconfig register content gets cleared during device reset. We
1532 	 * need to re-enable user interrupts for kernel work queue completion
1533 	 * IRQ to function.
1534 	 */
1535 	if (idxd->pasid != IOMMU_PASID_INVALID)
1536 		idxd_set_user_intr(idxd, 1);
1537 
1538 	rc = idxd_device_evl_setup(idxd);
1539 	if (rc < 0) {
1540 		idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
1541 		return rc;
1542 	}
1543 
1544 	/* Start device */
1545 	rc = idxd_device_enable(idxd);
1546 	if (rc < 0) {
1547 		idxd_device_evl_free(idxd);
1548 		return rc;
1549 	}
1550 
1551 	/* Setup DMA device without channels */
1552 	rc = idxd_register_dma_device(idxd);
1553 	if (rc < 0) {
1554 		idxd_device_disable(idxd);
1555 		idxd_device_evl_free(idxd);
1556 		idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1557 		return rc;
1558 	}
1559 
1560 	idxd->cmd_status = 0;
1561 	return 0;
1562 }
1563 
1564 void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1565 {
1566 	struct device *dev = &idxd_dev->conf_dev;
1567 	struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1568 	int i;
1569 
1570 	for (i = 0; i < idxd->max_wqs; i++) {
1571 		struct idxd_wq *wq = idxd->wqs[i];
1572 		struct device *wq_dev = wq_confdev(wq);
1573 
1574 		if (wq->state == IDXD_WQ_DISABLED)
1575 			continue;
1576 		dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1577 		device_release_driver(wq_dev);
1578 	}
1579 
1580 	idxd_unregister_dma_device(idxd);
1581 	idxd_device_disable(idxd);
1582 	if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1583 		idxd_device_reset(idxd);
1584 	idxd_device_evl_free(idxd);
1585 }
1586 
1587 static enum idxd_dev_type dev_types[] = {
1588 	IDXD_DEV_DSA,
1589 	IDXD_DEV_IAX,
1590 	IDXD_DEV_NONE,
1591 };
1592 
1593 struct idxd_device_driver idxd_drv = {
1594 	.type = dev_types,
1595 	.probe = idxd_device_drv_probe,
1596 	.remove = idxd_device_drv_remove,
1597 	.name = "idxd",
1598 };
1599 EXPORT_SYMBOL_GPL(idxd_drv);
1600