xref: /openbmc/linux/drivers/dma/idxd/device.c (revision 62eab49f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <linux/irq.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
13 #include "idxd.h"
14 #include "registers.h"
15 
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17 			  u32 *status);
18 
19 /* Interrupt control bits */
20 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
21 {
22 	struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
23 
24 	pci_msi_mask_irq(data);
25 }
26 
27 void idxd_mask_msix_vectors(struct idxd_device *idxd)
28 {
29 	struct pci_dev *pdev = idxd->pdev;
30 	int msixcnt = pci_msix_vec_count(pdev);
31 	int i;
32 
33 	for (i = 0; i < msixcnt; i++)
34 		idxd_mask_msix_vector(idxd, i);
35 }
36 
37 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
38 {
39 	struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
40 
41 	pci_msi_unmask_irq(data);
42 }
43 
44 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
45 {
46 	union genctrl_reg genctrl;
47 
48 	genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
49 	genctrl.softerr_int_en = 1;
50 	iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
51 }
52 
53 void idxd_mask_error_interrupts(struct idxd_device *idxd)
54 {
55 	union genctrl_reg genctrl;
56 
57 	genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
58 	genctrl.softerr_int_en = 0;
59 	iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
60 }
61 
62 static void free_hw_descs(struct idxd_wq *wq)
63 {
64 	int i;
65 
66 	for (i = 0; i < wq->num_descs; i++)
67 		kfree(wq->hw_descs[i]);
68 
69 	kfree(wq->hw_descs);
70 }
71 
72 static int alloc_hw_descs(struct idxd_wq *wq, int num)
73 {
74 	struct device *dev = &wq->idxd->pdev->dev;
75 	int i;
76 	int node = dev_to_node(dev);
77 
78 	wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
79 				    GFP_KERNEL, node);
80 	if (!wq->hw_descs)
81 		return -ENOMEM;
82 
83 	for (i = 0; i < num; i++) {
84 		wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
85 					       GFP_KERNEL, node);
86 		if (!wq->hw_descs[i]) {
87 			free_hw_descs(wq);
88 			return -ENOMEM;
89 		}
90 	}
91 
92 	return 0;
93 }
94 
95 static void free_descs(struct idxd_wq *wq)
96 {
97 	int i;
98 
99 	for (i = 0; i < wq->num_descs; i++)
100 		kfree(wq->descs[i]);
101 
102 	kfree(wq->descs);
103 }
104 
105 static int alloc_descs(struct idxd_wq *wq, int num)
106 {
107 	struct device *dev = &wq->idxd->pdev->dev;
108 	int i;
109 	int node = dev_to_node(dev);
110 
111 	wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
112 				 GFP_KERNEL, node);
113 	if (!wq->descs)
114 		return -ENOMEM;
115 
116 	for (i = 0; i < num; i++) {
117 		wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
118 					    GFP_KERNEL, node);
119 		if (!wq->descs[i]) {
120 			free_descs(wq);
121 			return -ENOMEM;
122 		}
123 	}
124 
125 	return 0;
126 }
127 
128 /* WQ control bits */
129 int idxd_wq_alloc_resources(struct idxd_wq *wq)
130 {
131 	struct idxd_device *idxd = wq->idxd;
132 	struct device *dev = &idxd->pdev->dev;
133 	int rc, num_descs, i;
134 	int align;
135 	u64 tmp;
136 
137 	if (wq->type != IDXD_WQT_KERNEL)
138 		return 0;
139 
140 	wq->num_descs = wq->size;
141 	num_descs = wq->size;
142 
143 	rc = alloc_hw_descs(wq, num_descs);
144 	if (rc < 0)
145 		return rc;
146 
147 	if (idxd->type == IDXD_TYPE_DSA)
148 		align = 32;
149 	else if (idxd->type == IDXD_TYPE_IAX)
150 		align = 64;
151 	else
152 		return -ENODEV;
153 
154 	wq->compls_size = num_descs * idxd->compl_size + align;
155 	wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
156 					    &wq->compls_addr_raw, GFP_KERNEL);
157 	if (!wq->compls_raw) {
158 		rc = -ENOMEM;
159 		goto fail_alloc_compls;
160 	}
161 
162 	/* Adjust alignment */
163 	wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
164 	tmp = (u64)wq->compls_raw;
165 	tmp = (tmp + (align - 1)) & ~(align - 1);
166 	wq->compls = (struct dsa_completion_record *)tmp;
167 
168 	rc = alloc_descs(wq, num_descs);
169 	if (rc < 0)
170 		goto fail_alloc_descs;
171 
172 	rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
173 				     dev_to_node(dev));
174 	if (rc < 0)
175 		goto fail_sbitmap_init;
176 
177 	for (i = 0; i < num_descs; i++) {
178 		struct idxd_desc *desc = wq->descs[i];
179 
180 		desc->hw = wq->hw_descs[i];
181 		if (idxd->type == IDXD_TYPE_DSA)
182 			desc->completion = &wq->compls[i];
183 		else if (idxd->type == IDXD_TYPE_IAX)
184 			desc->iax_completion = &wq->iax_compls[i];
185 		desc->compl_dma = wq->compls_addr + idxd->compl_size * i;
186 		desc->id = i;
187 		desc->wq = wq;
188 		desc->cpu = -1;
189 		dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
190 		desc->txd.tx_submit = idxd_dma_tx_submit;
191 	}
192 
193 	return 0;
194 
195  fail_sbitmap_init:
196 	free_descs(wq);
197  fail_alloc_descs:
198 	dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
199 			  wq->compls_addr_raw);
200  fail_alloc_compls:
201 	free_hw_descs(wq);
202 	return rc;
203 }
204 
205 void idxd_wq_free_resources(struct idxd_wq *wq)
206 {
207 	struct device *dev = &wq->idxd->pdev->dev;
208 
209 	if (wq->type != IDXD_WQT_KERNEL)
210 		return;
211 
212 	free_hw_descs(wq);
213 	free_descs(wq);
214 	dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
215 			  wq->compls_addr_raw);
216 	sbitmap_queue_free(&wq->sbq);
217 }
218 
219 int idxd_wq_enable(struct idxd_wq *wq)
220 {
221 	struct idxd_device *idxd = wq->idxd;
222 	struct device *dev = &idxd->pdev->dev;
223 	u32 status;
224 
225 	if (wq->state == IDXD_WQ_ENABLED) {
226 		dev_dbg(dev, "WQ %d already enabled\n", wq->id);
227 		return -ENXIO;
228 	}
229 
230 	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
231 
232 	if (status != IDXD_CMDSTS_SUCCESS &&
233 	    status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
234 		dev_dbg(dev, "WQ enable failed: %#x\n", status);
235 		return -ENXIO;
236 	}
237 
238 	wq->state = IDXD_WQ_ENABLED;
239 	dev_dbg(dev, "WQ %d enabled\n", wq->id);
240 	return 0;
241 }
242 
243 int idxd_wq_disable(struct idxd_wq *wq)
244 {
245 	struct idxd_device *idxd = wq->idxd;
246 	struct device *dev = &idxd->pdev->dev;
247 	u32 status, operand;
248 
249 	dev_dbg(dev, "Disabling WQ %d\n", wq->id);
250 
251 	if (wq->state != IDXD_WQ_ENABLED) {
252 		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
253 		return 0;
254 	}
255 
256 	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
257 	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
258 
259 	if (status != IDXD_CMDSTS_SUCCESS) {
260 		dev_dbg(dev, "WQ disable failed: %#x\n", status);
261 		return -ENXIO;
262 	}
263 
264 	wq->state = IDXD_WQ_DISABLED;
265 	dev_dbg(dev, "WQ %d disabled\n", wq->id);
266 	return 0;
267 }
268 
269 void idxd_wq_drain(struct idxd_wq *wq)
270 {
271 	struct idxd_device *idxd = wq->idxd;
272 	struct device *dev = &idxd->pdev->dev;
273 	u32 operand;
274 
275 	if (wq->state != IDXD_WQ_ENABLED) {
276 		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
277 		return;
278 	}
279 
280 	dev_dbg(dev, "Draining WQ %d\n", wq->id);
281 	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
282 	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
283 }
284 
285 int idxd_wq_map_portal(struct idxd_wq *wq)
286 {
287 	struct idxd_device *idxd = wq->idxd;
288 	struct pci_dev *pdev = idxd->pdev;
289 	struct device *dev = &pdev->dev;
290 	resource_size_t start;
291 
292 	start = pci_resource_start(pdev, IDXD_WQ_BAR);
293 	start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
294 
295 	wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
296 	if (!wq->portal)
297 		return -ENOMEM;
298 
299 	return 0;
300 }
301 
302 void idxd_wq_unmap_portal(struct idxd_wq *wq)
303 {
304 	struct device *dev = &wq->idxd->pdev->dev;
305 
306 	devm_iounmap(dev, wq->portal);
307 }
308 
309 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
310 {
311 	struct idxd_device *idxd = wq->idxd;
312 	int rc;
313 	union wqcfg wqcfg;
314 	unsigned int offset;
315 	unsigned long flags;
316 
317 	rc = idxd_wq_disable(wq);
318 	if (rc < 0)
319 		return rc;
320 
321 	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
322 	spin_lock_irqsave(&idxd->dev_lock, flags);
323 	wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
324 	wqcfg.pasid_en = 1;
325 	wqcfg.pasid = pasid;
326 	iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
327 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
328 
329 	rc = idxd_wq_enable(wq);
330 	if (rc < 0)
331 		return rc;
332 
333 	return 0;
334 }
335 
336 int idxd_wq_disable_pasid(struct idxd_wq *wq)
337 {
338 	struct idxd_device *idxd = wq->idxd;
339 	int rc;
340 	union wqcfg wqcfg;
341 	unsigned int offset;
342 	unsigned long flags;
343 
344 	rc = idxd_wq_disable(wq);
345 	if (rc < 0)
346 		return rc;
347 
348 	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
349 	spin_lock_irqsave(&idxd->dev_lock, flags);
350 	wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
351 	wqcfg.pasid_en = 0;
352 	wqcfg.pasid = 0;
353 	iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
354 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
355 
356 	rc = idxd_wq_enable(wq);
357 	if (rc < 0)
358 		return rc;
359 
360 	return 0;
361 }
362 
363 void idxd_wq_disable_cleanup(struct idxd_wq *wq)
364 {
365 	struct idxd_device *idxd = wq->idxd;
366 	struct device *dev = &idxd->pdev->dev;
367 	int i, wq_offset;
368 
369 	lockdep_assert_held(&idxd->dev_lock);
370 	memset(wq->wqcfg, 0, idxd->wqcfg_size);
371 	wq->type = IDXD_WQT_NONE;
372 	wq->size = 0;
373 	wq->group = NULL;
374 	wq->threshold = 0;
375 	wq->priority = 0;
376 	wq->ats_dis = 0;
377 	clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
378 	memset(wq->name, 0, WQ_NAME_SIZE);
379 
380 	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
381 		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
382 		iowrite32(0, idxd->reg_base + wq_offset);
383 		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
384 			wq->id, i, wq_offset,
385 			ioread32(idxd->reg_base + wq_offset));
386 	}
387 }
388 
389 /* Device control bits */
390 static inline bool idxd_is_enabled(struct idxd_device *idxd)
391 {
392 	union gensts_reg gensts;
393 
394 	gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
395 
396 	if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
397 		return true;
398 	return false;
399 }
400 
401 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
402 {
403 	union gensts_reg gensts;
404 
405 	gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
406 
407 	return (gensts.state == IDXD_DEVICE_STATE_HALT);
408 }
409 
410 /*
411  * This is function is only used for reset during probe and will
412  * poll for completion. Once the device is setup with interrupts,
413  * all commands will be done via interrupt completion.
414  */
415 int idxd_device_init_reset(struct idxd_device *idxd)
416 {
417 	struct device *dev = &idxd->pdev->dev;
418 	union idxd_command_reg cmd;
419 	unsigned long flags;
420 
421 	if (idxd_device_is_halted(idxd)) {
422 		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
423 		return -ENXIO;
424 	}
425 
426 	memset(&cmd, 0, sizeof(cmd));
427 	cmd.cmd = IDXD_CMD_RESET_DEVICE;
428 	dev_dbg(dev, "%s: sending reset for init.\n", __func__);
429 	spin_lock_irqsave(&idxd->dev_lock, flags);
430 	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
431 
432 	while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
433 	       IDXD_CMDSTS_ACTIVE)
434 		cpu_relax();
435 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
436 	return 0;
437 }
438 
439 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
440 			  u32 *status)
441 {
442 	union idxd_command_reg cmd;
443 	DECLARE_COMPLETION_ONSTACK(done);
444 	unsigned long flags;
445 
446 	if (idxd_device_is_halted(idxd)) {
447 		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
448 		*status = IDXD_CMDSTS_HW_ERR;
449 		return;
450 	}
451 
452 	memset(&cmd, 0, sizeof(cmd));
453 	cmd.cmd = cmd_code;
454 	cmd.operand = operand;
455 	cmd.int_req = 1;
456 
457 	spin_lock_irqsave(&idxd->dev_lock, flags);
458 	wait_event_lock_irq(idxd->cmd_waitq,
459 			    !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
460 			    idxd->dev_lock);
461 
462 	dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
463 		__func__, cmd_code, operand);
464 
465 	idxd->cmd_status = 0;
466 	__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
467 	idxd->cmd_done = &done;
468 	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
469 
470 	/*
471 	 * After command submitted, release lock and go to sleep until
472 	 * the command completes via interrupt.
473 	 */
474 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
475 	wait_for_completion(&done);
476 	spin_lock_irqsave(&idxd->dev_lock, flags);
477 	if (status) {
478 		*status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
479 		idxd->cmd_status = *status & GENMASK(7, 0);
480 	}
481 
482 	__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
483 	/* Wake up other pending commands */
484 	wake_up(&idxd->cmd_waitq);
485 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
486 }
487 
488 int idxd_device_enable(struct idxd_device *idxd)
489 {
490 	struct device *dev = &idxd->pdev->dev;
491 	u32 status;
492 
493 	if (idxd_is_enabled(idxd)) {
494 		dev_dbg(dev, "Device already enabled\n");
495 		return -ENXIO;
496 	}
497 
498 	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
499 
500 	/* If the command is successful or if the device was enabled */
501 	if (status != IDXD_CMDSTS_SUCCESS &&
502 	    status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
503 		dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
504 		return -ENXIO;
505 	}
506 
507 	idxd->state = IDXD_DEV_ENABLED;
508 	return 0;
509 }
510 
511 void idxd_device_wqs_clear_state(struct idxd_device *idxd)
512 {
513 	int i;
514 
515 	lockdep_assert_held(&idxd->dev_lock);
516 
517 	for (i = 0; i < idxd->max_wqs; i++) {
518 		struct idxd_wq *wq = &idxd->wqs[i];
519 
520 		if (wq->state == IDXD_WQ_ENABLED) {
521 			idxd_wq_disable_cleanup(wq);
522 			wq->state = IDXD_WQ_DISABLED;
523 		}
524 	}
525 }
526 
527 int idxd_device_disable(struct idxd_device *idxd)
528 {
529 	struct device *dev = &idxd->pdev->dev;
530 	u32 status;
531 	unsigned long flags;
532 
533 	if (!idxd_is_enabled(idxd)) {
534 		dev_dbg(dev, "Device is not enabled\n");
535 		return 0;
536 	}
537 
538 	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
539 
540 	/* If the command is successful or if the device was disabled */
541 	if (status != IDXD_CMDSTS_SUCCESS &&
542 	    !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
543 		dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
544 		return -ENXIO;
545 	}
546 
547 	spin_lock_irqsave(&idxd->dev_lock, flags);
548 	idxd_device_wqs_clear_state(idxd);
549 	idxd->state = IDXD_DEV_CONF_READY;
550 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
551 	return 0;
552 }
553 
554 void idxd_device_reset(struct idxd_device *idxd)
555 {
556 	unsigned long flags;
557 
558 	idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
559 	spin_lock_irqsave(&idxd->dev_lock, flags);
560 	idxd_device_wqs_clear_state(idxd);
561 	idxd->state = IDXD_DEV_CONF_READY;
562 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
563 }
564 
565 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
566 {
567 	struct device *dev = &idxd->pdev->dev;
568 	u32 operand;
569 
570 	operand = pasid;
571 	dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
572 	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
573 	dev_dbg(dev, "pasid %d drained\n", pasid);
574 }
575 
576 /* Device configuration bits */
577 static void idxd_group_config_write(struct idxd_group *group)
578 {
579 	struct idxd_device *idxd = group->idxd;
580 	struct device *dev = &idxd->pdev->dev;
581 	int i;
582 	u32 grpcfg_offset;
583 
584 	dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
585 
586 	/* setup GRPWQCFG */
587 	for (i = 0; i < GRPWQCFG_STRIDES; i++) {
588 		grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
589 		iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
590 		dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
591 			group->id, i, grpcfg_offset,
592 			ioread64(idxd->reg_base + grpcfg_offset));
593 	}
594 
595 	/* setup GRPENGCFG */
596 	grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
597 	iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
598 	dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
599 		grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
600 
601 	/* setup GRPFLAGS */
602 	grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
603 	iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
604 	dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
605 		group->id, grpcfg_offset,
606 		ioread32(idxd->reg_base + grpcfg_offset));
607 }
608 
609 static int idxd_groups_config_write(struct idxd_device *idxd)
610 
611 {
612 	union gencfg_reg reg;
613 	int i;
614 	struct device *dev = &idxd->pdev->dev;
615 
616 	/* Setup bandwidth token limit */
617 	if (idxd->token_limit) {
618 		reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
619 		reg.token_limit = idxd->token_limit;
620 		iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
621 	}
622 
623 	dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
624 		ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
625 
626 	for (i = 0; i < idxd->max_groups; i++) {
627 		struct idxd_group *group = &idxd->groups[i];
628 
629 		idxd_group_config_write(group);
630 	}
631 
632 	return 0;
633 }
634 
635 static int idxd_wq_config_write(struct idxd_wq *wq)
636 {
637 	struct idxd_device *idxd = wq->idxd;
638 	struct device *dev = &idxd->pdev->dev;
639 	u32 wq_offset;
640 	int i;
641 
642 	if (!wq->group)
643 		return 0;
644 
645 	memset(wq->wqcfg, 0, idxd->wqcfg_size);
646 
647 	/* byte 0-3 */
648 	wq->wqcfg->wq_size = wq->size;
649 
650 	if (wq->size == 0) {
651 		dev_warn(dev, "Incorrect work queue size: 0\n");
652 		return -EINVAL;
653 	}
654 
655 	/* bytes 4-7 */
656 	wq->wqcfg->wq_thresh = wq->threshold;
657 
658 	/* byte 8-11 */
659 	wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
660 	if (wq_dedicated(wq))
661 		wq->wqcfg->mode = 1;
662 
663 	if (device_pasid_enabled(idxd)) {
664 		wq->wqcfg->pasid_en = 1;
665 		if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
666 			wq->wqcfg->pasid = idxd->pasid;
667 	}
668 
669 	wq->wqcfg->priority = wq->priority;
670 
671 	if (idxd->hw.gen_cap.block_on_fault &&
672 	    test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
673 		wq->wqcfg->bof = 1;
674 
675 	if (idxd->hw.wq_cap.wq_ats_support)
676 		wq->wqcfg->wq_ats_disable = wq->ats_dis;
677 
678 	/* bytes 12-15 */
679 	wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
680 	wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
681 
682 	dev_dbg(dev, "WQ %d CFGs\n", wq->id);
683 	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
684 		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
685 		iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
686 		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
687 			wq->id, i, wq_offset,
688 			ioread32(idxd->reg_base + wq_offset));
689 	}
690 
691 	return 0;
692 }
693 
694 static int idxd_wqs_config_write(struct idxd_device *idxd)
695 {
696 	int i, rc;
697 
698 	for (i = 0; i < idxd->max_wqs; i++) {
699 		struct idxd_wq *wq = &idxd->wqs[i];
700 
701 		rc = idxd_wq_config_write(wq);
702 		if (rc < 0)
703 			return rc;
704 	}
705 
706 	return 0;
707 }
708 
709 static void idxd_group_flags_setup(struct idxd_device *idxd)
710 {
711 	int i;
712 
713 	/* TC-A 0 and TC-B 1 should be defaults */
714 	for (i = 0; i < idxd->max_groups; i++) {
715 		struct idxd_group *group = &idxd->groups[i];
716 
717 		if (group->tc_a == -1)
718 			group->tc_a = group->grpcfg.flags.tc_a = 0;
719 		else
720 			group->grpcfg.flags.tc_a = group->tc_a;
721 		if (group->tc_b == -1)
722 			group->tc_b = group->grpcfg.flags.tc_b = 1;
723 		else
724 			group->grpcfg.flags.tc_b = group->tc_b;
725 		group->grpcfg.flags.use_token_limit = group->use_token_limit;
726 		group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
727 		if (group->tokens_allowed)
728 			group->grpcfg.flags.tokens_allowed =
729 				group->tokens_allowed;
730 		else
731 			group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
732 	}
733 }
734 
735 static int idxd_engines_setup(struct idxd_device *idxd)
736 {
737 	int i, engines = 0;
738 	struct idxd_engine *eng;
739 	struct idxd_group *group;
740 
741 	for (i = 0; i < idxd->max_groups; i++) {
742 		group = &idxd->groups[i];
743 		group->grpcfg.engines = 0;
744 	}
745 
746 	for (i = 0; i < idxd->max_engines; i++) {
747 		eng = &idxd->engines[i];
748 		group = eng->group;
749 
750 		if (!group)
751 			continue;
752 
753 		group->grpcfg.engines |= BIT(eng->id);
754 		engines++;
755 	}
756 
757 	if (!engines)
758 		return -EINVAL;
759 
760 	return 0;
761 }
762 
763 static int idxd_wqs_setup(struct idxd_device *idxd)
764 {
765 	struct idxd_wq *wq;
766 	struct idxd_group *group;
767 	int i, j, configured = 0;
768 	struct device *dev = &idxd->pdev->dev;
769 
770 	for (i = 0; i < idxd->max_groups; i++) {
771 		group = &idxd->groups[i];
772 		for (j = 0; j < 4; j++)
773 			group->grpcfg.wqs[j] = 0;
774 	}
775 
776 	for (i = 0; i < idxd->max_wqs; i++) {
777 		wq = &idxd->wqs[i];
778 		group = wq->group;
779 
780 		if (!wq->group)
781 			continue;
782 		if (!wq->size)
783 			continue;
784 
785 		if (wq_shared(wq) && !device_swq_supported(idxd)) {
786 			dev_warn(dev, "No shared wq support but configured.\n");
787 			return -EINVAL;
788 		}
789 
790 		group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
791 		configured++;
792 	}
793 
794 	if (configured == 0)
795 		return -EINVAL;
796 
797 	return 0;
798 }
799 
800 int idxd_device_config(struct idxd_device *idxd)
801 {
802 	int rc;
803 
804 	lockdep_assert_held(&idxd->dev_lock);
805 	rc = idxd_wqs_setup(idxd);
806 	if (rc < 0)
807 		return rc;
808 
809 	rc = idxd_engines_setup(idxd);
810 	if (rc < 0)
811 		return rc;
812 
813 	idxd_group_flags_setup(idxd);
814 
815 	rc = idxd_wqs_config_write(idxd);
816 	if (rc < 0)
817 		return rc;
818 
819 	rc = idxd_groups_config_write(idxd);
820 	if (rc < 0)
821 		return rc;
822 
823 	return 0;
824 }
825