xref: /openbmc/linux/drivers/dma/idxd/sysfs.c (revision 5b7b41cb)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 static void idxd_conf_device_release(struct device *dev)
20 {
21 	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22 }
23 
24 static struct device_type idxd_group_device_type = {
25 	.name = "group",
26 	.release = idxd_conf_device_release,
27 };
28 
29 static struct device_type idxd_wq_device_type = {
30 	.name = "wq",
31 	.release = idxd_conf_device_release,
32 };
33 
34 static struct device_type idxd_engine_device_type = {
35 	.name = "engine",
36 	.release = idxd_conf_device_release,
37 };
38 
39 static struct device_type dsa_device_type = {
40 	.name = "dsa",
41 	.release = idxd_conf_device_release,
42 };
43 
44 static inline bool is_dsa_dev(struct device *dev)
45 {
46 	return dev ? dev->type == &dsa_device_type : false;
47 }
48 
49 static inline bool is_idxd_dev(struct device *dev)
50 {
51 	return is_dsa_dev(dev);
52 }
53 
54 static inline bool is_idxd_wq_dev(struct device *dev)
55 {
56 	return dev ? dev->type == &idxd_wq_device_type : false;
57 }
58 
59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
60 {
61 	if (wq->type == IDXD_WQT_KERNEL &&
62 	    strcmp(wq->name, "dmaengine") == 0)
63 		return true;
64 	return false;
65 }
66 
67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
68 {
69 	return wq->type == IDXD_WQT_USER;
70 }
71 
72 static int idxd_config_bus_match(struct device *dev,
73 				 struct device_driver *drv)
74 {
75 	int matched = 0;
76 
77 	if (is_idxd_dev(dev)) {
78 		struct idxd_device *idxd = confdev_to_idxd(dev);
79 
80 		if (idxd->state != IDXD_DEV_CONF_READY)
81 			return 0;
82 		matched = 1;
83 	} else if (is_idxd_wq_dev(dev)) {
84 		struct idxd_wq *wq = confdev_to_wq(dev);
85 		struct idxd_device *idxd = wq->idxd;
86 
87 		if (idxd->state < IDXD_DEV_CONF_READY)
88 			return 0;
89 
90 		if (wq->state != IDXD_WQ_DISABLED) {
91 			dev_dbg(dev, "%s not disabled\n", dev_name(dev));
92 			return 0;
93 		}
94 		matched = 1;
95 	}
96 
97 	if (matched)
98 		dev_dbg(dev, "%s matched\n", dev_name(dev));
99 
100 	return matched;
101 }
102 
103 static int idxd_config_bus_probe(struct device *dev)
104 {
105 	int rc;
106 	unsigned long flags;
107 
108 	dev_dbg(dev, "%s called\n", __func__);
109 
110 	if (is_idxd_dev(dev)) {
111 		struct idxd_device *idxd = confdev_to_idxd(dev);
112 
113 		if (idxd->state != IDXD_DEV_CONF_READY) {
114 			dev_warn(dev, "Device not ready for config\n");
115 			return -EBUSY;
116 		}
117 
118 		if (!try_module_get(THIS_MODULE))
119 			return -ENXIO;
120 
121 		/* Perform IDXD configuration and enabling */
122 		spin_lock_irqsave(&idxd->dev_lock, flags);
123 		rc = idxd_device_config(idxd);
124 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
125 		if (rc < 0) {
126 			module_put(THIS_MODULE);
127 			dev_warn(dev, "Device config failed: %d\n", rc);
128 			return rc;
129 		}
130 
131 		/* start device */
132 		rc = idxd_device_enable(idxd);
133 		if (rc < 0) {
134 			module_put(THIS_MODULE);
135 			dev_warn(dev, "Device enable failed: %d\n", rc);
136 			return rc;
137 		}
138 
139 		dev_info(dev, "Device %s enabled\n", dev_name(dev));
140 
141 		rc = idxd_register_dma_device(idxd);
142 		if (rc < 0) {
143 			module_put(THIS_MODULE);
144 			dev_dbg(dev, "Failed to register dmaengine device\n");
145 			return rc;
146 		}
147 		return 0;
148 	} else if (is_idxd_wq_dev(dev)) {
149 		struct idxd_wq *wq = confdev_to_wq(dev);
150 		struct idxd_device *idxd = wq->idxd;
151 
152 		mutex_lock(&wq->wq_lock);
153 
154 		if (idxd->state != IDXD_DEV_ENABLED) {
155 			mutex_unlock(&wq->wq_lock);
156 			dev_warn(dev, "Enabling while device not enabled.\n");
157 			return -EPERM;
158 		}
159 
160 		if (wq->state != IDXD_WQ_DISABLED) {
161 			mutex_unlock(&wq->wq_lock);
162 			dev_warn(dev, "WQ %d already enabled.\n", wq->id);
163 			return -EBUSY;
164 		}
165 
166 		if (!wq->group) {
167 			mutex_unlock(&wq->wq_lock);
168 			dev_warn(dev, "WQ not attached to group.\n");
169 			return -EINVAL;
170 		}
171 
172 		if (strlen(wq->name) == 0) {
173 			mutex_unlock(&wq->wq_lock);
174 			dev_warn(dev, "WQ name not set.\n");
175 			return -EINVAL;
176 		}
177 
178 		/* Shared WQ checks */
179 		if (wq_shared(wq)) {
180 			if (!device_swq_supported(idxd)) {
181 				dev_warn(dev,
182 					 "PASID not enabled and shared WQ.\n");
183 				mutex_unlock(&wq->wq_lock);
184 				return -ENXIO;
185 			}
186 			/*
187 			 * Shared wq with the threshold set to 0 means the user
188 			 * did not set the threshold or transitioned from a
189 			 * dedicated wq but did not set threshold. A value
190 			 * of 0 would effectively disable the shared wq. The
191 			 * driver does not allow a value of 0 to be set for
192 			 * threshold via sysfs.
193 			 */
194 			if (wq->threshold == 0) {
195 				dev_warn(dev,
196 					 "Shared WQ and threshold 0.\n");
197 				mutex_unlock(&wq->wq_lock);
198 				return -EINVAL;
199 			}
200 		}
201 
202 		rc = idxd_wq_alloc_resources(wq);
203 		if (rc < 0) {
204 			mutex_unlock(&wq->wq_lock);
205 			dev_warn(dev, "WQ resource alloc failed\n");
206 			return rc;
207 		}
208 
209 		spin_lock_irqsave(&idxd->dev_lock, flags);
210 		rc = idxd_device_config(idxd);
211 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
212 		if (rc < 0) {
213 			mutex_unlock(&wq->wq_lock);
214 			dev_warn(dev, "Writing WQ %d config failed: %d\n",
215 				 wq->id, rc);
216 			return rc;
217 		}
218 
219 		rc = idxd_wq_enable(wq);
220 		if (rc < 0) {
221 			mutex_unlock(&wq->wq_lock);
222 			dev_warn(dev, "WQ %d enabling failed: %d\n",
223 				 wq->id, rc);
224 			return rc;
225 		}
226 
227 		rc = idxd_wq_map_portal(wq);
228 		if (rc < 0) {
229 			dev_warn(dev, "wq portal mapping failed: %d\n", rc);
230 			rc = idxd_wq_disable(wq);
231 			if (rc < 0)
232 				dev_warn(dev, "IDXD wq disable failed\n");
233 			mutex_unlock(&wq->wq_lock);
234 			return rc;
235 		}
236 
237 		wq->client_count = 0;
238 
239 		dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
240 
241 		if (is_idxd_wq_dmaengine(wq)) {
242 			rc = idxd_register_dma_channel(wq);
243 			if (rc < 0) {
244 				dev_dbg(dev, "DMA channel register failed\n");
245 				mutex_unlock(&wq->wq_lock);
246 				return rc;
247 			}
248 		} else if (is_idxd_wq_cdev(wq)) {
249 			rc = idxd_wq_add_cdev(wq);
250 			if (rc < 0) {
251 				dev_dbg(dev, "Cdev creation failed\n");
252 				mutex_unlock(&wq->wq_lock);
253 				return rc;
254 			}
255 		}
256 
257 		mutex_unlock(&wq->wq_lock);
258 		return 0;
259 	}
260 
261 	return -ENODEV;
262 }
263 
264 static void disable_wq(struct idxd_wq *wq)
265 {
266 	struct idxd_device *idxd = wq->idxd;
267 	struct device *dev = &idxd->pdev->dev;
268 	int rc;
269 
270 	mutex_lock(&wq->wq_lock);
271 	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
272 	if (wq->state == IDXD_WQ_DISABLED) {
273 		mutex_unlock(&wq->wq_lock);
274 		return;
275 	}
276 
277 	if (is_idxd_wq_dmaengine(wq))
278 		idxd_unregister_dma_channel(wq);
279 	else if (is_idxd_wq_cdev(wq))
280 		idxd_wq_del_cdev(wq);
281 
282 	if (idxd_wq_refcount(wq))
283 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
284 			 wq->id, idxd_wq_refcount(wq));
285 
286 	idxd_wq_unmap_portal(wq);
287 
288 	idxd_wq_drain(wq);
289 	rc = idxd_wq_disable(wq);
290 
291 	idxd_wq_free_resources(wq);
292 	wq->client_count = 0;
293 	mutex_unlock(&wq->wq_lock);
294 
295 	if (rc < 0)
296 		dev_warn(dev, "Failed to disable %s: %d\n",
297 			 dev_name(&wq->conf_dev), rc);
298 	else
299 		dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
300 }
301 
302 static int idxd_config_bus_remove(struct device *dev)
303 {
304 	int rc;
305 
306 	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
307 
308 	/* disable workqueue here */
309 	if (is_idxd_wq_dev(dev)) {
310 		struct idxd_wq *wq = confdev_to_wq(dev);
311 
312 		disable_wq(wq);
313 	} else if (is_idxd_dev(dev)) {
314 		struct idxd_device *idxd = confdev_to_idxd(dev);
315 		int i;
316 
317 		dev_dbg(dev, "%s removing dev %s\n", __func__,
318 			dev_name(&idxd->conf_dev));
319 		for (i = 0; i < idxd->max_wqs; i++) {
320 			struct idxd_wq *wq = &idxd->wqs[i];
321 
322 			if (wq->state == IDXD_WQ_DISABLED)
323 				continue;
324 			dev_warn(dev, "Active wq %d on disable %s.\n", i,
325 				 dev_name(&idxd->conf_dev));
326 			device_release_driver(&wq->conf_dev);
327 		}
328 
329 		idxd_unregister_dma_device(idxd);
330 		rc = idxd_device_disable(idxd);
331 		for (i = 0; i < idxd->max_wqs; i++) {
332 			struct idxd_wq *wq = &idxd->wqs[i];
333 
334 			mutex_lock(&wq->wq_lock);
335 			idxd_wq_disable_cleanup(wq);
336 			mutex_unlock(&wq->wq_lock);
337 		}
338 		module_put(THIS_MODULE);
339 		if (rc < 0)
340 			dev_warn(dev, "Device disable failed\n");
341 		else
342 			dev_info(dev, "Device %s disabled\n", dev_name(dev));
343 
344 	}
345 
346 	return 0;
347 }
348 
349 static void idxd_config_bus_shutdown(struct device *dev)
350 {
351 	dev_dbg(dev, "%s called\n", __func__);
352 }
353 
354 struct bus_type dsa_bus_type = {
355 	.name = "dsa",
356 	.match = idxd_config_bus_match,
357 	.probe = idxd_config_bus_probe,
358 	.remove = idxd_config_bus_remove,
359 	.shutdown = idxd_config_bus_shutdown,
360 };
361 
362 static struct bus_type *idxd_bus_types[] = {
363 	&dsa_bus_type
364 };
365 
366 static struct idxd_device_driver dsa_drv = {
367 	.drv = {
368 		.name = "dsa",
369 		.bus = &dsa_bus_type,
370 		.owner = THIS_MODULE,
371 		.mod_name = KBUILD_MODNAME,
372 	},
373 };
374 
375 static struct idxd_device_driver *idxd_drvs[] = {
376 	&dsa_drv
377 };
378 
379 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
380 {
381 	return idxd_bus_types[idxd->type];
382 }
383 
384 static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
385 {
386 	if (idxd->type == IDXD_TYPE_DSA)
387 		return &dsa_device_type;
388 	else
389 		return NULL;
390 }
391 
392 /* IDXD generic driver setup */
393 int idxd_register_driver(void)
394 {
395 	int i, rc;
396 
397 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
398 		rc = driver_register(&idxd_drvs[i]->drv);
399 		if (rc < 0)
400 			goto drv_fail;
401 	}
402 
403 	return 0;
404 
405 drv_fail:
406 	for (; i > 0; i--)
407 		driver_unregister(&idxd_drvs[i]->drv);
408 	return rc;
409 }
410 
411 void idxd_unregister_driver(void)
412 {
413 	int i;
414 
415 	for (i = 0; i < IDXD_TYPE_MAX; i++)
416 		driver_unregister(&idxd_drvs[i]->drv);
417 }
418 
419 /* IDXD engine attributes */
420 static ssize_t engine_group_id_show(struct device *dev,
421 				    struct device_attribute *attr, char *buf)
422 {
423 	struct idxd_engine *engine =
424 		container_of(dev, struct idxd_engine, conf_dev);
425 
426 	if (engine->group)
427 		return sprintf(buf, "%d\n", engine->group->id);
428 	else
429 		return sprintf(buf, "%d\n", -1);
430 }
431 
432 static ssize_t engine_group_id_store(struct device *dev,
433 				     struct device_attribute *attr,
434 				     const char *buf, size_t count)
435 {
436 	struct idxd_engine *engine =
437 		container_of(dev, struct idxd_engine, conf_dev);
438 	struct idxd_device *idxd = engine->idxd;
439 	long id;
440 	int rc;
441 	struct idxd_group *prevg;
442 
443 	rc = kstrtol(buf, 10, &id);
444 	if (rc < 0)
445 		return -EINVAL;
446 
447 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
448 		return -EPERM;
449 
450 	if (id > idxd->max_groups - 1 || id < -1)
451 		return -EINVAL;
452 
453 	if (id == -1) {
454 		if (engine->group) {
455 			engine->group->num_engines--;
456 			engine->group = NULL;
457 		}
458 		return count;
459 	}
460 
461 	prevg = engine->group;
462 
463 	if (prevg)
464 		prevg->num_engines--;
465 	engine->group = &idxd->groups[id];
466 	engine->group->num_engines++;
467 
468 	return count;
469 }
470 
471 static struct device_attribute dev_attr_engine_group =
472 		__ATTR(group_id, 0644, engine_group_id_show,
473 		       engine_group_id_store);
474 
475 static struct attribute *idxd_engine_attributes[] = {
476 	&dev_attr_engine_group.attr,
477 	NULL,
478 };
479 
480 static const struct attribute_group idxd_engine_attribute_group = {
481 	.attrs = idxd_engine_attributes,
482 };
483 
484 static const struct attribute_group *idxd_engine_attribute_groups[] = {
485 	&idxd_engine_attribute_group,
486 	NULL,
487 };
488 
489 /* Group attributes */
490 
491 static void idxd_set_free_tokens(struct idxd_device *idxd)
492 {
493 	int i, tokens;
494 
495 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
496 		struct idxd_group *g = &idxd->groups[i];
497 
498 		tokens += g->tokens_reserved;
499 	}
500 
501 	idxd->nr_tokens = idxd->max_tokens - tokens;
502 }
503 
504 static ssize_t group_tokens_reserved_show(struct device *dev,
505 					  struct device_attribute *attr,
506 					  char *buf)
507 {
508 	struct idxd_group *group =
509 		container_of(dev, struct idxd_group, conf_dev);
510 
511 	return sprintf(buf, "%u\n", group->tokens_reserved);
512 }
513 
514 static ssize_t group_tokens_reserved_store(struct device *dev,
515 					   struct device_attribute *attr,
516 					   const char *buf, size_t count)
517 {
518 	struct idxd_group *group =
519 		container_of(dev, struct idxd_group, conf_dev);
520 	struct idxd_device *idxd = group->idxd;
521 	unsigned long val;
522 	int rc;
523 
524 	rc = kstrtoul(buf, 10, &val);
525 	if (rc < 0)
526 		return -EINVAL;
527 
528 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
529 		return -EPERM;
530 
531 	if (idxd->state == IDXD_DEV_ENABLED)
532 		return -EPERM;
533 
534 	if (val > idxd->max_tokens)
535 		return -EINVAL;
536 
537 	if (val > idxd->nr_tokens + group->tokens_reserved)
538 		return -EINVAL;
539 
540 	group->tokens_reserved = val;
541 	idxd_set_free_tokens(idxd);
542 	return count;
543 }
544 
545 static struct device_attribute dev_attr_group_tokens_reserved =
546 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
547 		       group_tokens_reserved_store);
548 
549 static ssize_t group_tokens_allowed_show(struct device *dev,
550 					 struct device_attribute *attr,
551 					 char *buf)
552 {
553 	struct idxd_group *group =
554 		container_of(dev, struct idxd_group, conf_dev);
555 
556 	return sprintf(buf, "%u\n", group->tokens_allowed);
557 }
558 
559 static ssize_t group_tokens_allowed_store(struct device *dev,
560 					  struct device_attribute *attr,
561 					  const char *buf, size_t count)
562 {
563 	struct idxd_group *group =
564 		container_of(dev, struct idxd_group, conf_dev);
565 	struct idxd_device *idxd = group->idxd;
566 	unsigned long val;
567 	int rc;
568 
569 	rc = kstrtoul(buf, 10, &val);
570 	if (rc < 0)
571 		return -EINVAL;
572 
573 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
574 		return -EPERM;
575 
576 	if (idxd->state == IDXD_DEV_ENABLED)
577 		return -EPERM;
578 
579 	if (val < 4 * group->num_engines ||
580 	    val > group->tokens_reserved + idxd->nr_tokens)
581 		return -EINVAL;
582 
583 	group->tokens_allowed = val;
584 	return count;
585 }
586 
587 static struct device_attribute dev_attr_group_tokens_allowed =
588 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
589 		       group_tokens_allowed_store);
590 
591 static ssize_t group_use_token_limit_show(struct device *dev,
592 					  struct device_attribute *attr,
593 					  char *buf)
594 {
595 	struct idxd_group *group =
596 		container_of(dev, struct idxd_group, conf_dev);
597 
598 	return sprintf(buf, "%u\n", group->use_token_limit);
599 }
600 
601 static ssize_t group_use_token_limit_store(struct device *dev,
602 					   struct device_attribute *attr,
603 					   const char *buf, size_t count)
604 {
605 	struct idxd_group *group =
606 		container_of(dev, struct idxd_group, conf_dev);
607 	struct idxd_device *idxd = group->idxd;
608 	unsigned long val;
609 	int rc;
610 
611 	rc = kstrtoul(buf, 10, &val);
612 	if (rc < 0)
613 		return -EINVAL;
614 
615 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
616 		return -EPERM;
617 
618 	if (idxd->state == IDXD_DEV_ENABLED)
619 		return -EPERM;
620 
621 	if (idxd->token_limit == 0)
622 		return -EPERM;
623 
624 	group->use_token_limit = !!val;
625 	return count;
626 }
627 
628 static struct device_attribute dev_attr_group_use_token_limit =
629 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
630 		       group_use_token_limit_store);
631 
632 static ssize_t group_engines_show(struct device *dev,
633 				  struct device_attribute *attr, char *buf)
634 {
635 	struct idxd_group *group =
636 		container_of(dev, struct idxd_group, conf_dev);
637 	int i, rc = 0;
638 	char *tmp = buf;
639 	struct idxd_device *idxd = group->idxd;
640 
641 	for (i = 0; i < idxd->max_engines; i++) {
642 		struct idxd_engine *engine = &idxd->engines[i];
643 
644 		if (!engine->group)
645 			continue;
646 
647 		if (engine->group->id == group->id)
648 			rc += sprintf(tmp + rc, "engine%d.%d ",
649 					idxd->id, engine->id);
650 	}
651 
652 	rc--;
653 	rc += sprintf(tmp + rc, "\n");
654 
655 	return rc;
656 }
657 
658 static struct device_attribute dev_attr_group_engines =
659 		__ATTR(engines, 0444, group_engines_show, NULL);
660 
661 static ssize_t group_work_queues_show(struct device *dev,
662 				      struct device_attribute *attr, char *buf)
663 {
664 	struct idxd_group *group =
665 		container_of(dev, struct idxd_group, conf_dev);
666 	int i, rc = 0;
667 	char *tmp = buf;
668 	struct idxd_device *idxd = group->idxd;
669 
670 	for (i = 0; i < idxd->max_wqs; i++) {
671 		struct idxd_wq *wq = &idxd->wqs[i];
672 
673 		if (!wq->group)
674 			continue;
675 
676 		if (wq->group->id == group->id)
677 			rc += sprintf(tmp + rc, "wq%d.%d ",
678 					idxd->id, wq->id);
679 	}
680 
681 	rc--;
682 	rc += sprintf(tmp + rc, "\n");
683 
684 	return rc;
685 }
686 
687 static struct device_attribute dev_attr_group_work_queues =
688 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
689 
690 static ssize_t group_traffic_class_a_show(struct device *dev,
691 					  struct device_attribute *attr,
692 					  char *buf)
693 {
694 	struct idxd_group *group =
695 		container_of(dev, struct idxd_group, conf_dev);
696 
697 	return sprintf(buf, "%d\n", group->tc_a);
698 }
699 
700 static ssize_t group_traffic_class_a_store(struct device *dev,
701 					   struct device_attribute *attr,
702 					   const char *buf, size_t count)
703 {
704 	struct idxd_group *group =
705 		container_of(dev, struct idxd_group, conf_dev);
706 	struct idxd_device *idxd = group->idxd;
707 	long val;
708 	int rc;
709 
710 	rc = kstrtol(buf, 10, &val);
711 	if (rc < 0)
712 		return -EINVAL;
713 
714 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
715 		return -EPERM;
716 
717 	if (idxd->state == IDXD_DEV_ENABLED)
718 		return -EPERM;
719 
720 	if (val < 0 || val > 7)
721 		return -EINVAL;
722 
723 	group->tc_a = val;
724 	return count;
725 }
726 
727 static struct device_attribute dev_attr_group_traffic_class_a =
728 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
729 		       group_traffic_class_a_store);
730 
731 static ssize_t group_traffic_class_b_show(struct device *dev,
732 					  struct device_attribute *attr,
733 					  char *buf)
734 {
735 	struct idxd_group *group =
736 		container_of(dev, struct idxd_group, conf_dev);
737 
738 	return sprintf(buf, "%d\n", group->tc_b);
739 }
740 
741 static ssize_t group_traffic_class_b_store(struct device *dev,
742 					   struct device_attribute *attr,
743 					   const char *buf, size_t count)
744 {
745 	struct idxd_group *group =
746 		container_of(dev, struct idxd_group, conf_dev);
747 	struct idxd_device *idxd = group->idxd;
748 	long val;
749 	int rc;
750 
751 	rc = kstrtol(buf, 10, &val);
752 	if (rc < 0)
753 		return -EINVAL;
754 
755 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
756 		return -EPERM;
757 
758 	if (idxd->state == IDXD_DEV_ENABLED)
759 		return -EPERM;
760 
761 	if (val < 0 || val > 7)
762 		return -EINVAL;
763 
764 	group->tc_b = val;
765 	return count;
766 }
767 
768 static struct device_attribute dev_attr_group_traffic_class_b =
769 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
770 		       group_traffic_class_b_store);
771 
772 static struct attribute *idxd_group_attributes[] = {
773 	&dev_attr_group_work_queues.attr,
774 	&dev_attr_group_engines.attr,
775 	&dev_attr_group_use_token_limit.attr,
776 	&dev_attr_group_tokens_allowed.attr,
777 	&dev_attr_group_tokens_reserved.attr,
778 	&dev_attr_group_traffic_class_a.attr,
779 	&dev_attr_group_traffic_class_b.attr,
780 	NULL,
781 };
782 
783 static const struct attribute_group idxd_group_attribute_group = {
784 	.attrs = idxd_group_attributes,
785 };
786 
787 static const struct attribute_group *idxd_group_attribute_groups[] = {
788 	&idxd_group_attribute_group,
789 	NULL,
790 };
791 
792 /* IDXD work queue attribs */
793 static ssize_t wq_clients_show(struct device *dev,
794 			       struct device_attribute *attr, char *buf)
795 {
796 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
797 
798 	return sprintf(buf, "%d\n", wq->client_count);
799 }
800 
801 static struct device_attribute dev_attr_wq_clients =
802 		__ATTR(clients, 0444, wq_clients_show, NULL);
803 
804 static ssize_t wq_state_show(struct device *dev,
805 			     struct device_attribute *attr, char *buf)
806 {
807 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
808 
809 	switch (wq->state) {
810 	case IDXD_WQ_DISABLED:
811 		return sprintf(buf, "disabled\n");
812 	case IDXD_WQ_ENABLED:
813 		return sprintf(buf, "enabled\n");
814 	}
815 
816 	return sprintf(buf, "unknown\n");
817 }
818 
819 static struct device_attribute dev_attr_wq_state =
820 		__ATTR(state, 0444, wq_state_show, NULL);
821 
822 static ssize_t wq_group_id_show(struct device *dev,
823 				struct device_attribute *attr, char *buf)
824 {
825 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
826 
827 	if (wq->group)
828 		return sprintf(buf, "%u\n", wq->group->id);
829 	else
830 		return sprintf(buf, "-1\n");
831 }
832 
833 static ssize_t wq_group_id_store(struct device *dev,
834 				 struct device_attribute *attr,
835 				 const char *buf, size_t count)
836 {
837 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
838 	struct idxd_device *idxd = wq->idxd;
839 	long id;
840 	int rc;
841 	struct idxd_group *prevg, *group;
842 
843 	rc = kstrtol(buf, 10, &id);
844 	if (rc < 0)
845 		return -EINVAL;
846 
847 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
848 		return -EPERM;
849 
850 	if (wq->state != IDXD_WQ_DISABLED)
851 		return -EPERM;
852 
853 	if (id > idxd->max_groups - 1 || id < -1)
854 		return -EINVAL;
855 
856 	if (id == -1) {
857 		if (wq->group) {
858 			wq->group->num_wqs--;
859 			wq->group = NULL;
860 		}
861 		return count;
862 	}
863 
864 	group = &idxd->groups[id];
865 	prevg = wq->group;
866 
867 	if (prevg)
868 		prevg->num_wqs--;
869 	wq->group = group;
870 	group->num_wqs++;
871 	return count;
872 }
873 
874 static struct device_attribute dev_attr_wq_group_id =
875 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
876 
877 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
878 			    char *buf)
879 {
880 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
881 
882 	return sprintf(buf, "%s\n",
883 			wq_dedicated(wq) ? "dedicated" : "shared");
884 }
885 
886 static ssize_t wq_mode_store(struct device *dev,
887 			     struct device_attribute *attr, const char *buf,
888 			     size_t count)
889 {
890 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
891 	struct idxd_device *idxd = wq->idxd;
892 
893 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
894 		return -EPERM;
895 
896 	if (wq->state != IDXD_WQ_DISABLED)
897 		return -EPERM;
898 
899 	if (sysfs_streq(buf, "dedicated")) {
900 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
901 		wq->threshold = 0;
902 	} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
903 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
904 	} else {
905 		return -EINVAL;
906 	}
907 
908 	return count;
909 }
910 
911 static struct device_attribute dev_attr_wq_mode =
912 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
913 
914 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
915 			    char *buf)
916 {
917 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
918 
919 	return sprintf(buf, "%u\n", wq->size);
920 }
921 
922 static int total_claimed_wq_size(struct idxd_device *idxd)
923 {
924 	int i;
925 	int wq_size = 0;
926 
927 	for (i = 0; i < idxd->max_wqs; i++) {
928 		struct idxd_wq *wq = &idxd->wqs[i];
929 
930 		wq_size += wq->size;
931 	}
932 
933 	return wq_size;
934 }
935 
936 static ssize_t wq_size_store(struct device *dev,
937 			     struct device_attribute *attr, const char *buf,
938 			     size_t count)
939 {
940 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
941 	unsigned long size;
942 	struct idxd_device *idxd = wq->idxd;
943 	int rc;
944 
945 	rc = kstrtoul(buf, 10, &size);
946 	if (rc < 0)
947 		return -EINVAL;
948 
949 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
950 		return -EPERM;
951 
952 	if (wq->state != IDXD_WQ_DISABLED)
953 		return -EPERM;
954 
955 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
956 		return -EINVAL;
957 
958 	wq->size = size;
959 	return count;
960 }
961 
962 static struct device_attribute dev_attr_wq_size =
963 		__ATTR(size, 0644, wq_size_show, wq_size_store);
964 
965 static ssize_t wq_priority_show(struct device *dev,
966 				struct device_attribute *attr, char *buf)
967 {
968 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
969 
970 	return sprintf(buf, "%u\n", wq->priority);
971 }
972 
973 static ssize_t wq_priority_store(struct device *dev,
974 				 struct device_attribute *attr,
975 				 const char *buf, size_t count)
976 {
977 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
978 	unsigned long prio;
979 	struct idxd_device *idxd = wq->idxd;
980 	int rc;
981 
982 	rc = kstrtoul(buf, 10, &prio);
983 	if (rc < 0)
984 		return -EINVAL;
985 
986 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
987 		return -EPERM;
988 
989 	if (wq->state != IDXD_WQ_DISABLED)
990 		return -EPERM;
991 
992 	if (prio > IDXD_MAX_PRIORITY)
993 		return -EINVAL;
994 
995 	wq->priority = prio;
996 	return count;
997 }
998 
999 static struct device_attribute dev_attr_wq_priority =
1000 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
1001 
1002 static ssize_t wq_block_on_fault_show(struct device *dev,
1003 				      struct device_attribute *attr, char *buf)
1004 {
1005 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1006 
1007 	return sprintf(buf, "%u\n",
1008 		       test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
1009 }
1010 
1011 static ssize_t wq_block_on_fault_store(struct device *dev,
1012 				       struct device_attribute *attr,
1013 				       const char *buf, size_t count)
1014 {
1015 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1016 	struct idxd_device *idxd = wq->idxd;
1017 	bool bof;
1018 	int rc;
1019 
1020 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1021 		return -EPERM;
1022 
1023 	if (wq->state != IDXD_WQ_DISABLED)
1024 		return -ENXIO;
1025 
1026 	rc = kstrtobool(buf, &bof);
1027 	if (rc < 0)
1028 		return rc;
1029 
1030 	if (bof)
1031 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1032 	else
1033 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1034 
1035 	return count;
1036 }
1037 
1038 static struct device_attribute dev_attr_wq_block_on_fault =
1039 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
1040 		       wq_block_on_fault_store);
1041 
1042 static ssize_t wq_threshold_show(struct device *dev,
1043 				 struct device_attribute *attr, char *buf)
1044 {
1045 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1046 
1047 	return sprintf(buf, "%u\n", wq->threshold);
1048 }
1049 
1050 static ssize_t wq_threshold_store(struct device *dev,
1051 				  struct device_attribute *attr,
1052 				  const char *buf, size_t count)
1053 {
1054 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1055 	struct idxd_device *idxd = wq->idxd;
1056 	unsigned int val;
1057 	int rc;
1058 
1059 	rc = kstrtouint(buf, 0, &val);
1060 	if (rc < 0)
1061 		return -EINVAL;
1062 
1063 	if (val > wq->size || val <= 0)
1064 		return -EINVAL;
1065 
1066 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1067 		return -EPERM;
1068 
1069 	if (wq->state != IDXD_WQ_DISABLED)
1070 		return -ENXIO;
1071 
1072 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1073 		return -EINVAL;
1074 
1075 	wq->threshold = val;
1076 
1077 	return count;
1078 }
1079 
1080 static struct device_attribute dev_attr_wq_threshold =
1081 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1082 
1083 static ssize_t wq_type_show(struct device *dev,
1084 			    struct device_attribute *attr, char *buf)
1085 {
1086 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1087 
1088 	switch (wq->type) {
1089 	case IDXD_WQT_KERNEL:
1090 		return sprintf(buf, "%s\n",
1091 			       idxd_wq_type_names[IDXD_WQT_KERNEL]);
1092 	case IDXD_WQT_USER:
1093 		return sprintf(buf, "%s\n",
1094 			       idxd_wq_type_names[IDXD_WQT_USER]);
1095 	case IDXD_WQT_NONE:
1096 	default:
1097 		return sprintf(buf, "%s\n",
1098 			       idxd_wq_type_names[IDXD_WQT_NONE]);
1099 	}
1100 
1101 	return -EINVAL;
1102 }
1103 
1104 static ssize_t wq_type_store(struct device *dev,
1105 			     struct device_attribute *attr, const char *buf,
1106 			     size_t count)
1107 {
1108 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1109 	enum idxd_wq_type old_type;
1110 
1111 	if (wq->state != IDXD_WQ_DISABLED)
1112 		return -EPERM;
1113 
1114 	old_type = wq->type;
1115 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1116 		wq->type = IDXD_WQT_NONE;
1117 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1118 		wq->type = IDXD_WQT_KERNEL;
1119 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1120 		wq->type = IDXD_WQT_USER;
1121 	else
1122 		return -EINVAL;
1123 
1124 	/* If we are changing queue type, clear the name */
1125 	if (wq->type != old_type)
1126 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
1127 
1128 	return count;
1129 }
1130 
1131 static struct device_attribute dev_attr_wq_type =
1132 		__ATTR(type, 0644, wq_type_show, wq_type_store);
1133 
1134 static ssize_t wq_name_show(struct device *dev,
1135 			    struct device_attribute *attr, char *buf)
1136 {
1137 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1138 
1139 	return sprintf(buf, "%s\n", wq->name);
1140 }
1141 
1142 static ssize_t wq_name_store(struct device *dev,
1143 			     struct device_attribute *attr, const char *buf,
1144 			     size_t count)
1145 {
1146 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1147 
1148 	if (wq->state != IDXD_WQ_DISABLED)
1149 		return -EPERM;
1150 
1151 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1152 		return -EINVAL;
1153 
1154 	/*
1155 	 * This is temporarily placed here until we have SVM support for
1156 	 * dmaengine.
1157 	 */
1158 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1159 		return -EOPNOTSUPP;
1160 
1161 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
1162 	strncpy(wq->name, buf, WQ_NAME_SIZE);
1163 	strreplace(wq->name, '\n', '\0');
1164 	return count;
1165 }
1166 
1167 static struct device_attribute dev_attr_wq_name =
1168 		__ATTR(name, 0644, wq_name_show, wq_name_store);
1169 
1170 static ssize_t wq_cdev_minor_show(struct device *dev,
1171 				  struct device_attribute *attr, char *buf)
1172 {
1173 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1174 
1175 	return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1176 }
1177 
1178 static struct device_attribute dev_attr_wq_cdev_minor =
1179 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1180 
1181 static int __get_sysfs_u64(const char *buf, u64 *val)
1182 {
1183 	int rc;
1184 
1185 	rc = kstrtou64(buf, 0, val);
1186 	if (rc < 0)
1187 		return -EINVAL;
1188 
1189 	if (*val == 0)
1190 		return -EINVAL;
1191 
1192 	*val = roundup_pow_of_two(*val);
1193 	return 0;
1194 }
1195 
1196 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1197 					 char *buf)
1198 {
1199 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1200 
1201 	return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
1202 }
1203 
1204 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1205 					  const char *buf, size_t count)
1206 {
1207 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1208 	struct idxd_device *idxd = wq->idxd;
1209 	u64 xfer_size;
1210 	int rc;
1211 
1212 	if (wq->state != IDXD_WQ_DISABLED)
1213 		return -EPERM;
1214 
1215 	rc = __get_sysfs_u64(buf, &xfer_size);
1216 	if (rc < 0)
1217 		return rc;
1218 
1219 	if (xfer_size > idxd->max_xfer_bytes)
1220 		return -EINVAL;
1221 
1222 	wq->max_xfer_bytes = xfer_size;
1223 
1224 	return count;
1225 }
1226 
1227 static struct device_attribute dev_attr_wq_max_transfer_size =
1228 		__ATTR(max_transfer_size, 0644,
1229 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
1230 
1231 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1232 {
1233 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1234 
1235 	return sprintf(buf, "%u\n", wq->max_batch_size);
1236 }
1237 
1238 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1239 				       const char *buf, size_t count)
1240 {
1241 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1242 	struct idxd_device *idxd = wq->idxd;
1243 	u64 batch_size;
1244 	int rc;
1245 
1246 	if (wq->state != IDXD_WQ_DISABLED)
1247 		return -EPERM;
1248 
1249 	rc = __get_sysfs_u64(buf, &batch_size);
1250 	if (rc < 0)
1251 		return rc;
1252 
1253 	if (batch_size > idxd->max_batch_size)
1254 		return -EINVAL;
1255 
1256 	wq->max_batch_size = (u32)batch_size;
1257 
1258 	return count;
1259 }
1260 
1261 static struct device_attribute dev_attr_wq_max_batch_size =
1262 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1263 
1264 static struct attribute *idxd_wq_attributes[] = {
1265 	&dev_attr_wq_clients.attr,
1266 	&dev_attr_wq_state.attr,
1267 	&dev_attr_wq_group_id.attr,
1268 	&dev_attr_wq_mode.attr,
1269 	&dev_attr_wq_size.attr,
1270 	&dev_attr_wq_priority.attr,
1271 	&dev_attr_wq_block_on_fault.attr,
1272 	&dev_attr_wq_threshold.attr,
1273 	&dev_attr_wq_type.attr,
1274 	&dev_attr_wq_name.attr,
1275 	&dev_attr_wq_cdev_minor.attr,
1276 	&dev_attr_wq_max_transfer_size.attr,
1277 	&dev_attr_wq_max_batch_size.attr,
1278 	NULL,
1279 };
1280 
1281 static const struct attribute_group idxd_wq_attribute_group = {
1282 	.attrs = idxd_wq_attributes,
1283 };
1284 
1285 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1286 	&idxd_wq_attribute_group,
1287 	NULL,
1288 };
1289 
1290 /* IDXD device attribs */
1291 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1292 			    char *buf)
1293 {
1294 	struct idxd_device *idxd =
1295 		container_of(dev, struct idxd_device, conf_dev);
1296 
1297 	return sprintf(buf, "%#x\n", idxd->hw.version);
1298 }
1299 static DEVICE_ATTR_RO(version);
1300 
1301 static ssize_t max_work_queues_size_show(struct device *dev,
1302 					 struct device_attribute *attr,
1303 					 char *buf)
1304 {
1305 	struct idxd_device *idxd =
1306 		container_of(dev, struct idxd_device, conf_dev);
1307 
1308 	return sprintf(buf, "%u\n", idxd->max_wq_size);
1309 }
1310 static DEVICE_ATTR_RO(max_work_queues_size);
1311 
1312 static ssize_t max_groups_show(struct device *dev,
1313 			       struct device_attribute *attr, char *buf)
1314 {
1315 	struct idxd_device *idxd =
1316 		container_of(dev, struct idxd_device, conf_dev);
1317 
1318 	return sprintf(buf, "%u\n", idxd->max_groups);
1319 }
1320 static DEVICE_ATTR_RO(max_groups);
1321 
1322 static ssize_t max_work_queues_show(struct device *dev,
1323 				    struct device_attribute *attr, char *buf)
1324 {
1325 	struct idxd_device *idxd =
1326 		container_of(dev, struct idxd_device, conf_dev);
1327 
1328 	return sprintf(buf, "%u\n", idxd->max_wqs);
1329 }
1330 static DEVICE_ATTR_RO(max_work_queues);
1331 
1332 static ssize_t max_engines_show(struct device *dev,
1333 				struct device_attribute *attr, char *buf)
1334 {
1335 	struct idxd_device *idxd =
1336 		container_of(dev, struct idxd_device, conf_dev);
1337 
1338 	return sprintf(buf, "%u\n", idxd->max_engines);
1339 }
1340 static DEVICE_ATTR_RO(max_engines);
1341 
1342 static ssize_t numa_node_show(struct device *dev,
1343 			      struct device_attribute *attr, char *buf)
1344 {
1345 	struct idxd_device *idxd =
1346 		container_of(dev, struct idxd_device, conf_dev);
1347 
1348 	return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1349 }
1350 static DEVICE_ATTR_RO(numa_node);
1351 
1352 static ssize_t max_batch_size_show(struct device *dev,
1353 				   struct device_attribute *attr, char *buf)
1354 {
1355 	struct idxd_device *idxd =
1356 		container_of(dev, struct idxd_device, conf_dev);
1357 
1358 	return sprintf(buf, "%u\n", idxd->max_batch_size);
1359 }
1360 static DEVICE_ATTR_RO(max_batch_size);
1361 
1362 static ssize_t max_transfer_size_show(struct device *dev,
1363 				      struct device_attribute *attr,
1364 				      char *buf)
1365 {
1366 	struct idxd_device *idxd =
1367 		container_of(dev, struct idxd_device, conf_dev);
1368 
1369 	return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1370 }
1371 static DEVICE_ATTR_RO(max_transfer_size);
1372 
1373 static ssize_t op_cap_show(struct device *dev,
1374 			   struct device_attribute *attr, char *buf)
1375 {
1376 	struct idxd_device *idxd =
1377 		container_of(dev, struct idxd_device, conf_dev);
1378 
1379 	return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1380 }
1381 static DEVICE_ATTR_RO(op_cap);
1382 
1383 static ssize_t gen_cap_show(struct device *dev,
1384 			    struct device_attribute *attr, char *buf)
1385 {
1386 	struct idxd_device *idxd =
1387 		container_of(dev, struct idxd_device, conf_dev);
1388 
1389 	return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1390 }
1391 static DEVICE_ATTR_RO(gen_cap);
1392 
1393 static ssize_t configurable_show(struct device *dev,
1394 				 struct device_attribute *attr, char *buf)
1395 {
1396 	struct idxd_device *idxd =
1397 		container_of(dev, struct idxd_device, conf_dev);
1398 
1399 	return sprintf(buf, "%u\n",
1400 			test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1401 }
1402 static DEVICE_ATTR_RO(configurable);
1403 
1404 static ssize_t clients_show(struct device *dev,
1405 			    struct device_attribute *attr, char *buf)
1406 {
1407 	struct idxd_device *idxd =
1408 		container_of(dev, struct idxd_device, conf_dev);
1409 	unsigned long flags;
1410 	int count = 0, i;
1411 
1412 	spin_lock_irqsave(&idxd->dev_lock, flags);
1413 	for (i = 0; i < idxd->max_wqs; i++) {
1414 		struct idxd_wq *wq = &idxd->wqs[i];
1415 
1416 		count += wq->client_count;
1417 	}
1418 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1419 
1420 	return sprintf(buf, "%d\n", count);
1421 }
1422 static DEVICE_ATTR_RO(clients);
1423 
1424 static ssize_t pasid_enabled_show(struct device *dev,
1425 				  struct device_attribute *attr, char *buf)
1426 {
1427 	struct idxd_device *idxd =
1428 		container_of(dev, struct idxd_device, conf_dev);
1429 
1430 	return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
1431 }
1432 static DEVICE_ATTR_RO(pasid_enabled);
1433 
1434 static ssize_t state_show(struct device *dev,
1435 			  struct device_attribute *attr, char *buf)
1436 {
1437 	struct idxd_device *idxd =
1438 		container_of(dev, struct idxd_device, conf_dev);
1439 
1440 	switch (idxd->state) {
1441 	case IDXD_DEV_DISABLED:
1442 	case IDXD_DEV_CONF_READY:
1443 		return sprintf(buf, "disabled\n");
1444 	case IDXD_DEV_ENABLED:
1445 		return sprintf(buf, "enabled\n");
1446 	case IDXD_DEV_HALTED:
1447 		return sprintf(buf, "halted\n");
1448 	}
1449 
1450 	return sprintf(buf, "unknown\n");
1451 }
1452 static DEVICE_ATTR_RO(state);
1453 
1454 static ssize_t errors_show(struct device *dev,
1455 			   struct device_attribute *attr, char *buf)
1456 {
1457 	struct idxd_device *idxd =
1458 		container_of(dev, struct idxd_device, conf_dev);
1459 	int i, out = 0;
1460 	unsigned long flags;
1461 
1462 	spin_lock_irqsave(&idxd->dev_lock, flags);
1463 	for (i = 0; i < 4; i++)
1464 		out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1465 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1466 	out--;
1467 	out += sprintf(buf + out, "\n");
1468 	return out;
1469 }
1470 static DEVICE_ATTR_RO(errors);
1471 
1472 static ssize_t max_tokens_show(struct device *dev,
1473 			       struct device_attribute *attr, char *buf)
1474 {
1475 	struct idxd_device *idxd =
1476 		container_of(dev, struct idxd_device, conf_dev);
1477 
1478 	return sprintf(buf, "%u\n", idxd->max_tokens);
1479 }
1480 static DEVICE_ATTR_RO(max_tokens);
1481 
1482 static ssize_t token_limit_show(struct device *dev,
1483 				struct device_attribute *attr, char *buf)
1484 {
1485 	struct idxd_device *idxd =
1486 		container_of(dev, struct idxd_device, conf_dev);
1487 
1488 	return sprintf(buf, "%u\n", idxd->token_limit);
1489 }
1490 
1491 static ssize_t token_limit_store(struct device *dev,
1492 				 struct device_attribute *attr,
1493 				 const char *buf, size_t count)
1494 {
1495 	struct idxd_device *idxd =
1496 		container_of(dev, struct idxd_device, conf_dev);
1497 	unsigned long val;
1498 	int rc;
1499 
1500 	rc = kstrtoul(buf, 10, &val);
1501 	if (rc < 0)
1502 		return -EINVAL;
1503 
1504 	if (idxd->state == IDXD_DEV_ENABLED)
1505 		return -EPERM;
1506 
1507 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1508 		return -EPERM;
1509 
1510 	if (!idxd->hw.group_cap.token_limit)
1511 		return -EPERM;
1512 
1513 	if (val > idxd->hw.group_cap.total_tokens)
1514 		return -EINVAL;
1515 
1516 	idxd->token_limit = val;
1517 	return count;
1518 }
1519 static DEVICE_ATTR_RW(token_limit);
1520 
1521 static ssize_t cdev_major_show(struct device *dev,
1522 			       struct device_attribute *attr, char *buf)
1523 {
1524 	struct idxd_device *idxd =
1525 		container_of(dev, struct idxd_device, conf_dev);
1526 
1527 	return sprintf(buf, "%u\n", idxd->major);
1528 }
1529 static DEVICE_ATTR_RO(cdev_major);
1530 
1531 static ssize_t cmd_status_show(struct device *dev,
1532 			       struct device_attribute *attr, char *buf)
1533 {
1534 	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1535 
1536 	return sprintf(buf, "%#x\n", idxd->cmd_status);
1537 }
1538 static DEVICE_ATTR_RO(cmd_status);
1539 
1540 static struct attribute *idxd_device_attributes[] = {
1541 	&dev_attr_version.attr,
1542 	&dev_attr_max_groups.attr,
1543 	&dev_attr_max_work_queues.attr,
1544 	&dev_attr_max_work_queues_size.attr,
1545 	&dev_attr_max_engines.attr,
1546 	&dev_attr_numa_node.attr,
1547 	&dev_attr_max_batch_size.attr,
1548 	&dev_attr_max_transfer_size.attr,
1549 	&dev_attr_op_cap.attr,
1550 	&dev_attr_gen_cap.attr,
1551 	&dev_attr_configurable.attr,
1552 	&dev_attr_clients.attr,
1553 	&dev_attr_pasid_enabled.attr,
1554 	&dev_attr_state.attr,
1555 	&dev_attr_errors.attr,
1556 	&dev_attr_max_tokens.attr,
1557 	&dev_attr_token_limit.attr,
1558 	&dev_attr_cdev_major.attr,
1559 	&dev_attr_cmd_status.attr,
1560 	NULL,
1561 };
1562 
1563 static const struct attribute_group idxd_device_attribute_group = {
1564 	.attrs = idxd_device_attributes,
1565 };
1566 
1567 static const struct attribute_group *idxd_attribute_groups[] = {
1568 	&idxd_device_attribute_group,
1569 	NULL,
1570 };
1571 
1572 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1573 {
1574 	struct device *dev = &idxd->pdev->dev;
1575 	int i, rc;
1576 
1577 	for (i = 0; i < idxd->max_engines; i++) {
1578 		struct idxd_engine *engine = &idxd->engines[i];
1579 
1580 		engine->conf_dev.parent = &idxd->conf_dev;
1581 		dev_set_name(&engine->conf_dev, "engine%d.%d",
1582 			     idxd->id, engine->id);
1583 		engine->conf_dev.bus = idxd_get_bus_type(idxd);
1584 		engine->conf_dev.groups = idxd_engine_attribute_groups;
1585 		engine->conf_dev.type = &idxd_engine_device_type;
1586 		dev_dbg(dev, "Engine device register: %s\n",
1587 			dev_name(&engine->conf_dev));
1588 		rc = device_register(&engine->conf_dev);
1589 		if (rc < 0) {
1590 			put_device(&engine->conf_dev);
1591 			goto cleanup;
1592 		}
1593 	}
1594 
1595 	return 0;
1596 
1597 cleanup:
1598 	while (i--) {
1599 		struct idxd_engine *engine = &idxd->engines[i];
1600 
1601 		device_unregister(&engine->conf_dev);
1602 	}
1603 	return rc;
1604 }
1605 
1606 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1607 {
1608 	struct device *dev = &idxd->pdev->dev;
1609 	int i, rc;
1610 
1611 	for (i = 0; i < idxd->max_groups; i++) {
1612 		struct idxd_group *group = &idxd->groups[i];
1613 
1614 		group->conf_dev.parent = &idxd->conf_dev;
1615 		dev_set_name(&group->conf_dev, "group%d.%d",
1616 			     idxd->id, group->id);
1617 		group->conf_dev.bus = idxd_get_bus_type(idxd);
1618 		group->conf_dev.groups = idxd_group_attribute_groups;
1619 		group->conf_dev.type = &idxd_group_device_type;
1620 		dev_dbg(dev, "Group device register: %s\n",
1621 			dev_name(&group->conf_dev));
1622 		rc = device_register(&group->conf_dev);
1623 		if (rc < 0) {
1624 			put_device(&group->conf_dev);
1625 			goto cleanup;
1626 		}
1627 	}
1628 
1629 	return 0;
1630 
1631 cleanup:
1632 	while (i--) {
1633 		struct idxd_group *group = &idxd->groups[i];
1634 
1635 		device_unregister(&group->conf_dev);
1636 	}
1637 	return rc;
1638 }
1639 
1640 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1641 {
1642 	struct device *dev = &idxd->pdev->dev;
1643 	int i, rc;
1644 
1645 	for (i = 0; i < idxd->max_wqs; i++) {
1646 		struct idxd_wq *wq = &idxd->wqs[i];
1647 
1648 		wq->conf_dev.parent = &idxd->conf_dev;
1649 		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1650 		wq->conf_dev.bus = idxd_get_bus_type(idxd);
1651 		wq->conf_dev.groups = idxd_wq_attribute_groups;
1652 		wq->conf_dev.type = &idxd_wq_device_type;
1653 		dev_dbg(dev, "WQ device register: %s\n",
1654 			dev_name(&wq->conf_dev));
1655 		rc = device_register(&wq->conf_dev);
1656 		if (rc < 0) {
1657 			put_device(&wq->conf_dev);
1658 			goto cleanup;
1659 		}
1660 	}
1661 
1662 	return 0;
1663 
1664 cleanup:
1665 	while (i--) {
1666 		struct idxd_wq *wq = &idxd->wqs[i];
1667 
1668 		device_unregister(&wq->conf_dev);
1669 	}
1670 	return rc;
1671 }
1672 
1673 static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1674 {
1675 	struct device *dev = &idxd->pdev->dev;
1676 	int rc;
1677 	char devname[IDXD_NAME_SIZE];
1678 
1679 	sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1680 	idxd->conf_dev.parent = dev;
1681 	dev_set_name(&idxd->conf_dev, "%s", devname);
1682 	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1683 	idxd->conf_dev.groups = idxd_attribute_groups;
1684 	idxd->conf_dev.type = idxd_get_device_type(idxd);
1685 
1686 	dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1687 	rc = device_register(&idxd->conf_dev);
1688 	if (rc < 0) {
1689 		put_device(&idxd->conf_dev);
1690 		return rc;
1691 	}
1692 
1693 	return 0;
1694 }
1695 
1696 int idxd_setup_sysfs(struct idxd_device *idxd)
1697 {
1698 	struct device *dev = &idxd->pdev->dev;
1699 	int rc;
1700 
1701 	rc = idxd_setup_device_sysfs(idxd);
1702 	if (rc < 0) {
1703 		dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1704 		return rc;
1705 	}
1706 
1707 	rc = idxd_setup_wq_sysfs(idxd);
1708 	if (rc < 0) {
1709 		/* unregister conf dev */
1710 		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1711 		return rc;
1712 	}
1713 
1714 	rc = idxd_setup_group_sysfs(idxd);
1715 	if (rc < 0) {
1716 		/* unregister conf dev */
1717 		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1718 		return rc;
1719 	}
1720 
1721 	rc = idxd_setup_engine_sysfs(idxd);
1722 	if (rc < 0) {
1723 		/* unregister conf dev */
1724 		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1725 		return rc;
1726 	}
1727 
1728 	return 0;
1729 }
1730 
1731 void idxd_cleanup_sysfs(struct idxd_device *idxd)
1732 {
1733 	int i;
1734 
1735 	for (i = 0; i < idxd->max_wqs; i++) {
1736 		struct idxd_wq *wq = &idxd->wqs[i];
1737 
1738 		device_unregister(&wq->conf_dev);
1739 	}
1740 
1741 	for (i = 0; i < idxd->max_engines; i++) {
1742 		struct idxd_engine *engine = &idxd->engines[i];
1743 
1744 		device_unregister(&engine->conf_dev);
1745 	}
1746 
1747 	for (i = 0; i < idxd->max_groups; i++) {
1748 		struct idxd_group *group = &idxd->groups[i];
1749 
1750 		device_unregister(&group->conf_dev);
1751 	}
1752 
1753 	device_unregister(&idxd->conf_dev);
1754 }
1755 
1756 int idxd_register_bus_type(void)
1757 {
1758 	int i, rc;
1759 
1760 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
1761 		rc = bus_register(idxd_bus_types[i]);
1762 		if (rc < 0)
1763 			goto bus_err;
1764 	}
1765 
1766 	return 0;
1767 
1768 bus_err:
1769 	for (; i > 0; i--)
1770 		bus_unregister(idxd_bus_types[i]);
1771 	return rc;
1772 }
1773 
1774 void idxd_unregister_bus_type(void)
1775 {
1776 	int i;
1777 
1778 	for (i = 0; i < IDXD_TYPE_MAX; i++)
1779 		bus_unregister(idxd_bus_types[i]);
1780 }
1781