xref: /openbmc/linux/drivers/dma/idxd/sysfs.c (revision b9dd2add)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 static void idxd_conf_device_release(struct device *dev)
20 {
21 	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22 }
23 
24 static struct device_type idxd_group_device_type = {
25 	.name = "group",
26 	.release = idxd_conf_device_release,
27 };
28 
29 static struct device_type idxd_wq_device_type = {
30 	.name = "wq",
31 	.release = idxd_conf_device_release,
32 };
33 
34 static struct device_type idxd_engine_device_type = {
35 	.name = "engine",
36 	.release = idxd_conf_device_release,
37 };
38 
39 static struct device_type dsa_device_type = {
40 	.name = "dsa",
41 	.release = idxd_conf_device_release,
42 };
43 
44 static struct device_type iax_device_type = {
45 	.name = "iax",
46 	.release = idxd_conf_device_release,
47 };
48 
49 static inline bool is_dsa_dev(struct device *dev)
50 {
51 	return dev ? dev->type == &dsa_device_type : false;
52 }
53 
54 static inline bool is_iax_dev(struct device *dev)
55 {
56 	return dev ? dev->type == &iax_device_type : false;
57 }
58 
59 static inline bool is_idxd_dev(struct device *dev)
60 {
61 	return is_dsa_dev(dev) || is_iax_dev(dev);
62 }
63 
64 static inline bool is_idxd_wq_dev(struct device *dev)
65 {
66 	return dev ? dev->type == &idxd_wq_device_type : false;
67 }
68 
69 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
70 {
71 	if (wq->type == IDXD_WQT_KERNEL &&
72 	    strcmp(wq->name, "dmaengine") == 0)
73 		return true;
74 	return false;
75 }
76 
77 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
78 {
79 	return wq->type == IDXD_WQT_USER;
80 }
81 
82 static int idxd_config_bus_match(struct device *dev,
83 				 struct device_driver *drv)
84 {
85 	int matched = 0;
86 
87 	if (is_idxd_dev(dev)) {
88 		struct idxd_device *idxd = confdev_to_idxd(dev);
89 
90 		if (idxd->state != IDXD_DEV_CONF_READY)
91 			return 0;
92 		matched = 1;
93 	} else if (is_idxd_wq_dev(dev)) {
94 		struct idxd_wq *wq = confdev_to_wq(dev);
95 		struct idxd_device *idxd = wq->idxd;
96 
97 		if (idxd->state < IDXD_DEV_CONF_READY)
98 			return 0;
99 
100 		if (wq->state != IDXD_WQ_DISABLED) {
101 			dev_dbg(dev, "%s not disabled\n", dev_name(dev));
102 			return 0;
103 		}
104 		matched = 1;
105 	}
106 
107 	if (matched)
108 		dev_dbg(dev, "%s matched\n", dev_name(dev));
109 
110 	return matched;
111 }
112 
113 static int idxd_config_bus_probe(struct device *dev)
114 {
115 	int rc;
116 	unsigned long flags;
117 
118 	dev_dbg(dev, "%s called\n", __func__);
119 
120 	if (is_idxd_dev(dev)) {
121 		struct idxd_device *idxd = confdev_to_idxd(dev);
122 
123 		if (idxd->state != IDXD_DEV_CONF_READY) {
124 			dev_warn(dev, "Device not ready for config\n");
125 			return -EBUSY;
126 		}
127 
128 		if (!try_module_get(THIS_MODULE))
129 			return -ENXIO;
130 
131 		/* Perform IDXD configuration and enabling */
132 		spin_lock_irqsave(&idxd->dev_lock, flags);
133 		rc = idxd_device_config(idxd);
134 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
135 		if (rc < 0) {
136 			module_put(THIS_MODULE);
137 			dev_warn(dev, "Device config failed: %d\n", rc);
138 			return rc;
139 		}
140 
141 		/* start device */
142 		rc = idxd_device_enable(idxd);
143 		if (rc < 0) {
144 			module_put(THIS_MODULE);
145 			dev_warn(dev, "Device enable failed: %d\n", rc);
146 			return rc;
147 		}
148 
149 		dev_info(dev, "Device %s enabled\n", dev_name(dev));
150 
151 		rc = idxd_register_dma_device(idxd);
152 		if (rc < 0) {
153 			module_put(THIS_MODULE);
154 			dev_dbg(dev, "Failed to register dmaengine device\n");
155 			return rc;
156 		}
157 		return 0;
158 	} else if (is_idxd_wq_dev(dev)) {
159 		struct idxd_wq *wq = confdev_to_wq(dev);
160 		struct idxd_device *idxd = wq->idxd;
161 
162 		mutex_lock(&wq->wq_lock);
163 
164 		if (idxd->state != IDXD_DEV_ENABLED) {
165 			mutex_unlock(&wq->wq_lock);
166 			dev_warn(dev, "Enabling while device not enabled.\n");
167 			return -EPERM;
168 		}
169 
170 		if (wq->state != IDXD_WQ_DISABLED) {
171 			mutex_unlock(&wq->wq_lock);
172 			dev_warn(dev, "WQ %d already enabled.\n", wq->id);
173 			return -EBUSY;
174 		}
175 
176 		if (!wq->group) {
177 			mutex_unlock(&wq->wq_lock);
178 			dev_warn(dev, "WQ not attached to group.\n");
179 			return -EINVAL;
180 		}
181 
182 		if (strlen(wq->name) == 0) {
183 			mutex_unlock(&wq->wq_lock);
184 			dev_warn(dev, "WQ name not set.\n");
185 			return -EINVAL;
186 		}
187 
188 		/* Shared WQ checks */
189 		if (wq_shared(wq)) {
190 			if (!device_swq_supported(idxd)) {
191 				dev_warn(dev,
192 					 "PASID not enabled and shared WQ.\n");
193 				mutex_unlock(&wq->wq_lock);
194 				return -ENXIO;
195 			}
196 			/*
197 			 * Shared wq with the threshold set to 0 means the user
198 			 * did not set the threshold or transitioned from a
199 			 * dedicated wq but did not set threshold. A value
200 			 * of 0 would effectively disable the shared wq. The
201 			 * driver does not allow a value of 0 to be set for
202 			 * threshold via sysfs.
203 			 */
204 			if (wq->threshold == 0) {
205 				dev_warn(dev,
206 					 "Shared WQ and threshold 0.\n");
207 				mutex_unlock(&wq->wq_lock);
208 				return -EINVAL;
209 			}
210 		}
211 
212 		rc = idxd_wq_alloc_resources(wq);
213 		if (rc < 0) {
214 			mutex_unlock(&wq->wq_lock);
215 			dev_warn(dev, "WQ resource alloc failed\n");
216 			return rc;
217 		}
218 
219 		spin_lock_irqsave(&idxd->dev_lock, flags);
220 		rc = idxd_device_config(idxd);
221 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
222 		if (rc < 0) {
223 			mutex_unlock(&wq->wq_lock);
224 			dev_warn(dev, "Writing WQ %d config failed: %d\n",
225 				 wq->id, rc);
226 			return rc;
227 		}
228 
229 		rc = idxd_wq_enable(wq);
230 		if (rc < 0) {
231 			mutex_unlock(&wq->wq_lock);
232 			dev_warn(dev, "WQ %d enabling failed: %d\n",
233 				 wq->id, rc);
234 			return rc;
235 		}
236 
237 		rc = idxd_wq_map_portal(wq);
238 		if (rc < 0) {
239 			dev_warn(dev, "wq portal mapping failed: %d\n", rc);
240 			rc = idxd_wq_disable(wq);
241 			if (rc < 0)
242 				dev_warn(dev, "IDXD wq disable failed\n");
243 			mutex_unlock(&wq->wq_lock);
244 			return rc;
245 		}
246 
247 		wq->client_count = 0;
248 
249 		dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
250 
251 		if (is_idxd_wq_dmaengine(wq)) {
252 			rc = idxd_register_dma_channel(wq);
253 			if (rc < 0) {
254 				dev_dbg(dev, "DMA channel register failed\n");
255 				mutex_unlock(&wq->wq_lock);
256 				return rc;
257 			}
258 		} else if (is_idxd_wq_cdev(wq)) {
259 			rc = idxd_wq_add_cdev(wq);
260 			if (rc < 0) {
261 				dev_dbg(dev, "Cdev creation failed\n");
262 				mutex_unlock(&wq->wq_lock);
263 				return rc;
264 			}
265 		}
266 
267 		mutex_unlock(&wq->wq_lock);
268 		return 0;
269 	}
270 
271 	return -ENODEV;
272 }
273 
274 static void disable_wq(struct idxd_wq *wq)
275 {
276 	struct idxd_device *idxd = wq->idxd;
277 	struct device *dev = &idxd->pdev->dev;
278 	int rc;
279 
280 	mutex_lock(&wq->wq_lock);
281 	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
282 	if (wq->state == IDXD_WQ_DISABLED) {
283 		mutex_unlock(&wq->wq_lock);
284 		return;
285 	}
286 
287 	if (is_idxd_wq_dmaengine(wq))
288 		idxd_unregister_dma_channel(wq);
289 	else if (is_idxd_wq_cdev(wq))
290 		idxd_wq_del_cdev(wq);
291 
292 	if (idxd_wq_refcount(wq))
293 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
294 			 wq->id, idxd_wq_refcount(wq));
295 
296 	idxd_wq_unmap_portal(wq);
297 
298 	idxd_wq_drain(wq);
299 	rc = idxd_wq_disable(wq);
300 
301 	idxd_wq_free_resources(wq);
302 	wq->client_count = 0;
303 	mutex_unlock(&wq->wq_lock);
304 
305 	if (rc < 0)
306 		dev_warn(dev, "Failed to disable %s: %d\n",
307 			 dev_name(&wq->conf_dev), rc);
308 	else
309 		dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
310 }
311 
312 static int idxd_config_bus_remove(struct device *dev)
313 {
314 	int rc;
315 
316 	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
317 
318 	/* disable workqueue here */
319 	if (is_idxd_wq_dev(dev)) {
320 		struct idxd_wq *wq = confdev_to_wq(dev);
321 
322 		disable_wq(wq);
323 	} else if (is_idxd_dev(dev)) {
324 		struct idxd_device *idxd = confdev_to_idxd(dev);
325 		int i;
326 
327 		dev_dbg(dev, "%s removing dev %s\n", __func__,
328 			dev_name(&idxd->conf_dev));
329 		for (i = 0; i < idxd->max_wqs; i++) {
330 			struct idxd_wq *wq = &idxd->wqs[i];
331 
332 			if (wq->state == IDXD_WQ_DISABLED)
333 				continue;
334 			dev_warn(dev, "Active wq %d on disable %s.\n", i,
335 				 dev_name(&idxd->conf_dev));
336 			device_release_driver(&wq->conf_dev);
337 		}
338 
339 		idxd_unregister_dma_device(idxd);
340 		rc = idxd_device_disable(idxd);
341 		for (i = 0; i < idxd->max_wqs; i++) {
342 			struct idxd_wq *wq = &idxd->wqs[i];
343 
344 			mutex_lock(&wq->wq_lock);
345 			idxd_wq_disable_cleanup(wq);
346 			mutex_unlock(&wq->wq_lock);
347 		}
348 		module_put(THIS_MODULE);
349 		if (rc < 0)
350 			dev_warn(dev, "Device disable failed\n");
351 		else
352 			dev_info(dev, "Device %s disabled\n", dev_name(dev));
353 
354 	}
355 
356 	return 0;
357 }
358 
359 static void idxd_config_bus_shutdown(struct device *dev)
360 {
361 	dev_dbg(dev, "%s called\n", __func__);
362 }
363 
364 struct bus_type dsa_bus_type = {
365 	.name = "dsa",
366 	.match = idxd_config_bus_match,
367 	.probe = idxd_config_bus_probe,
368 	.remove = idxd_config_bus_remove,
369 	.shutdown = idxd_config_bus_shutdown,
370 };
371 
372 struct bus_type iax_bus_type = {
373 	.name = "iax",
374 	.match = idxd_config_bus_match,
375 	.probe = idxd_config_bus_probe,
376 	.remove = idxd_config_bus_remove,
377 	.shutdown = idxd_config_bus_shutdown,
378 };
379 
380 static struct bus_type *idxd_bus_types[] = {
381 	&dsa_bus_type,
382 	&iax_bus_type
383 };
384 
385 static struct idxd_device_driver dsa_drv = {
386 	.drv = {
387 		.name = "dsa",
388 		.bus = &dsa_bus_type,
389 		.owner = THIS_MODULE,
390 		.mod_name = KBUILD_MODNAME,
391 	},
392 };
393 
394 static struct idxd_device_driver iax_drv = {
395 	.drv = {
396 		.name = "iax",
397 		.bus = &iax_bus_type,
398 		.owner = THIS_MODULE,
399 		.mod_name = KBUILD_MODNAME,
400 	},
401 };
402 
403 static struct idxd_device_driver *idxd_drvs[] = {
404 	&dsa_drv,
405 	&iax_drv
406 };
407 
408 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
409 {
410 	return idxd_bus_types[idxd->type];
411 }
412 
413 static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
414 {
415 	if (idxd->type == IDXD_TYPE_DSA)
416 		return &dsa_device_type;
417 	else if (idxd->type == IDXD_TYPE_IAX)
418 		return &iax_device_type;
419 	else
420 		return NULL;
421 }
422 
423 /* IDXD generic driver setup */
424 int idxd_register_driver(void)
425 {
426 	int i, rc;
427 
428 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
429 		rc = driver_register(&idxd_drvs[i]->drv);
430 		if (rc < 0)
431 			goto drv_fail;
432 	}
433 
434 	return 0;
435 
436 drv_fail:
437 	while (--i >= 0)
438 		driver_unregister(&idxd_drvs[i]->drv);
439 	return rc;
440 }
441 
442 void idxd_unregister_driver(void)
443 {
444 	int i;
445 
446 	for (i = 0; i < IDXD_TYPE_MAX; i++)
447 		driver_unregister(&idxd_drvs[i]->drv);
448 }
449 
450 /* IDXD engine attributes */
451 static ssize_t engine_group_id_show(struct device *dev,
452 				    struct device_attribute *attr, char *buf)
453 {
454 	struct idxd_engine *engine =
455 		container_of(dev, struct idxd_engine, conf_dev);
456 
457 	if (engine->group)
458 		return sprintf(buf, "%d\n", engine->group->id);
459 	else
460 		return sprintf(buf, "%d\n", -1);
461 }
462 
463 static ssize_t engine_group_id_store(struct device *dev,
464 				     struct device_attribute *attr,
465 				     const char *buf, size_t count)
466 {
467 	struct idxd_engine *engine =
468 		container_of(dev, struct idxd_engine, conf_dev);
469 	struct idxd_device *idxd = engine->idxd;
470 	long id;
471 	int rc;
472 	struct idxd_group *prevg;
473 
474 	rc = kstrtol(buf, 10, &id);
475 	if (rc < 0)
476 		return -EINVAL;
477 
478 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
479 		return -EPERM;
480 
481 	if (id > idxd->max_groups - 1 || id < -1)
482 		return -EINVAL;
483 
484 	if (id == -1) {
485 		if (engine->group) {
486 			engine->group->num_engines--;
487 			engine->group = NULL;
488 		}
489 		return count;
490 	}
491 
492 	prevg = engine->group;
493 
494 	if (prevg)
495 		prevg->num_engines--;
496 	engine->group = &idxd->groups[id];
497 	engine->group->num_engines++;
498 
499 	return count;
500 }
501 
502 static struct device_attribute dev_attr_engine_group =
503 		__ATTR(group_id, 0644, engine_group_id_show,
504 		       engine_group_id_store);
505 
506 static struct attribute *idxd_engine_attributes[] = {
507 	&dev_attr_engine_group.attr,
508 	NULL,
509 };
510 
511 static const struct attribute_group idxd_engine_attribute_group = {
512 	.attrs = idxd_engine_attributes,
513 };
514 
515 static const struct attribute_group *idxd_engine_attribute_groups[] = {
516 	&idxd_engine_attribute_group,
517 	NULL,
518 };
519 
520 /* Group attributes */
521 
522 static void idxd_set_free_tokens(struct idxd_device *idxd)
523 {
524 	int i, tokens;
525 
526 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
527 		struct idxd_group *g = &idxd->groups[i];
528 
529 		tokens += g->tokens_reserved;
530 	}
531 
532 	idxd->nr_tokens = idxd->max_tokens - tokens;
533 }
534 
535 static ssize_t group_tokens_reserved_show(struct device *dev,
536 					  struct device_attribute *attr,
537 					  char *buf)
538 {
539 	struct idxd_group *group =
540 		container_of(dev, struct idxd_group, conf_dev);
541 
542 	return sprintf(buf, "%u\n", group->tokens_reserved);
543 }
544 
545 static ssize_t group_tokens_reserved_store(struct device *dev,
546 					   struct device_attribute *attr,
547 					   const char *buf, size_t count)
548 {
549 	struct idxd_group *group =
550 		container_of(dev, struct idxd_group, conf_dev);
551 	struct idxd_device *idxd = group->idxd;
552 	unsigned long val;
553 	int rc;
554 
555 	rc = kstrtoul(buf, 10, &val);
556 	if (rc < 0)
557 		return -EINVAL;
558 
559 	if (idxd->type == IDXD_TYPE_IAX)
560 		return -EOPNOTSUPP;
561 
562 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
563 		return -EPERM;
564 
565 	if (idxd->state == IDXD_DEV_ENABLED)
566 		return -EPERM;
567 
568 	if (val > idxd->max_tokens)
569 		return -EINVAL;
570 
571 	if (val > idxd->nr_tokens + group->tokens_reserved)
572 		return -EINVAL;
573 
574 	group->tokens_reserved = val;
575 	idxd_set_free_tokens(idxd);
576 	return count;
577 }
578 
579 static struct device_attribute dev_attr_group_tokens_reserved =
580 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
581 		       group_tokens_reserved_store);
582 
583 static ssize_t group_tokens_allowed_show(struct device *dev,
584 					 struct device_attribute *attr,
585 					 char *buf)
586 {
587 	struct idxd_group *group =
588 		container_of(dev, struct idxd_group, conf_dev);
589 
590 	return sprintf(buf, "%u\n", group->tokens_allowed);
591 }
592 
593 static ssize_t group_tokens_allowed_store(struct device *dev,
594 					  struct device_attribute *attr,
595 					  const char *buf, size_t count)
596 {
597 	struct idxd_group *group =
598 		container_of(dev, struct idxd_group, conf_dev);
599 	struct idxd_device *idxd = group->idxd;
600 	unsigned long val;
601 	int rc;
602 
603 	rc = kstrtoul(buf, 10, &val);
604 	if (rc < 0)
605 		return -EINVAL;
606 
607 	if (idxd->type == IDXD_TYPE_IAX)
608 		return -EOPNOTSUPP;
609 
610 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
611 		return -EPERM;
612 
613 	if (idxd->state == IDXD_DEV_ENABLED)
614 		return -EPERM;
615 
616 	if (val < 4 * group->num_engines ||
617 	    val > group->tokens_reserved + idxd->nr_tokens)
618 		return -EINVAL;
619 
620 	group->tokens_allowed = val;
621 	return count;
622 }
623 
624 static struct device_attribute dev_attr_group_tokens_allowed =
625 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
626 		       group_tokens_allowed_store);
627 
628 static ssize_t group_use_token_limit_show(struct device *dev,
629 					  struct device_attribute *attr,
630 					  char *buf)
631 {
632 	struct idxd_group *group =
633 		container_of(dev, struct idxd_group, conf_dev);
634 
635 	return sprintf(buf, "%u\n", group->use_token_limit);
636 }
637 
638 static ssize_t group_use_token_limit_store(struct device *dev,
639 					   struct device_attribute *attr,
640 					   const char *buf, size_t count)
641 {
642 	struct idxd_group *group =
643 		container_of(dev, struct idxd_group, conf_dev);
644 	struct idxd_device *idxd = group->idxd;
645 	unsigned long val;
646 	int rc;
647 
648 	rc = kstrtoul(buf, 10, &val);
649 	if (rc < 0)
650 		return -EINVAL;
651 
652 	if (idxd->type == IDXD_TYPE_IAX)
653 		return -EOPNOTSUPP;
654 
655 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
656 		return -EPERM;
657 
658 	if (idxd->state == IDXD_DEV_ENABLED)
659 		return -EPERM;
660 
661 	if (idxd->token_limit == 0)
662 		return -EPERM;
663 
664 	group->use_token_limit = !!val;
665 	return count;
666 }
667 
668 static struct device_attribute dev_attr_group_use_token_limit =
669 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
670 		       group_use_token_limit_store);
671 
672 static ssize_t group_engines_show(struct device *dev,
673 				  struct device_attribute *attr, char *buf)
674 {
675 	struct idxd_group *group =
676 		container_of(dev, struct idxd_group, conf_dev);
677 	int i, rc = 0;
678 	char *tmp = buf;
679 	struct idxd_device *idxd = group->idxd;
680 
681 	for (i = 0; i < idxd->max_engines; i++) {
682 		struct idxd_engine *engine = &idxd->engines[i];
683 
684 		if (!engine->group)
685 			continue;
686 
687 		if (engine->group->id == group->id)
688 			rc += sprintf(tmp + rc, "engine%d.%d ",
689 					idxd->id, engine->id);
690 	}
691 
692 	rc--;
693 	rc += sprintf(tmp + rc, "\n");
694 
695 	return rc;
696 }
697 
698 static struct device_attribute dev_attr_group_engines =
699 		__ATTR(engines, 0444, group_engines_show, NULL);
700 
701 static ssize_t group_work_queues_show(struct device *dev,
702 				      struct device_attribute *attr, char *buf)
703 {
704 	struct idxd_group *group =
705 		container_of(dev, struct idxd_group, conf_dev);
706 	int i, rc = 0;
707 	char *tmp = buf;
708 	struct idxd_device *idxd = group->idxd;
709 
710 	for (i = 0; i < idxd->max_wqs; i++) {
711 		struct idxd_wq *wq = &idxd->wqs[i];
712 
713 		if (!wq->group)
714 			continue;
715 
716 		if (wq->group->id == group->id)
717 			rc += sprintf(tmp + rc, "wq%d.%d ",
718 					idxd->id, wq->id);
719 	}
720 
721 	rc--;
722 	rc += sprintf(tmp + rc, "\n");
723 
724 	return rc;
725 }
726 
727 static struct device_attribute dev_attr_group_work_queues =
728 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
729 
730 static ssize_t group_traffic_class_a_show(struct device *dev,
731 					  struct device_attribute *attr,
732 					  char *buf)
733 {
734 	struct idxd_group *group =
735 		container_of(dev, struct idxd_group, conf_dev);
736 
737 	return sprintf(buf, "%d\n", group->tc_a);
738 }
739 
740 static ssize_t group_traffic_class_a_store(struct device *dev,
741 					   struct device_attribute *attr,
742 					   const char *buf, size_t count)
743 {
744 	struct idxd_group *group =
745 		container_of(dev, struct idxd_group, conf_dev);
746 	struct idxd_device *idxd = group->idxd;
747 	long val;
748 	int rc;
749 
750 	rc = kstrtol(buf, 10, &val);
751 	if (rc < 0)
752 		return -EINVAL;
753 
754 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
755 		return -EPERM;
756 
757 	if (idxd->state == IDXD_DEV_ENABLED)
758 		return -EPERM;
759 
760 	if (val < 0 || val > 7)
761 		return -EINVAL;
762 
763 	group->tc_a = val;
764 	return count;
765 }
766 
767 static struct device_attribute dev_attr_group_traffic_class_a =
768 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
769 		       group_traffic_class_a_store);
770 
771 static ssize_t group_traffic_class_b_show(struct device *dev,
772 					  struct device_attribute *attr,
773 					  char *buf)
774 {
775 	struct idxd_group *group =
776 		container_of(dev, struct idxd_group, conf_dev);
777 
778 	return sprintf(buf, "%d\n", group->tc_b);
779 }
780 
781 static ssize_t group_traffic_class_b_store(struct device *dev,
782 					   struct device_attribute *attr,
783 					   const char *buf, size_t count)
784 {
785 	struct idxd_group *group =
786 		container_of(dev, struct idxd_group, conf_dev);
787 	struct idxd_device *idxd = group->idxd;
788 	long val;
789 	int rc;
790 
791 	rc = kstrtol(buf, 10, &val);
792 	if (rc < 0)
793 		return -EINVAL;
794 
795 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
796 		return -EPERM;
797 
798 	if (idxd->state == IDXD_DEV_ENABLED)
799 		return -EPERM;
800 
801 	if (val < 0 || val > 7)
802 		return -EINVAL;
803 
804 	group->tc_b = val;
805 	return count;
806 }
807 
808 static struct device_attribute dev_attr_group_traffic_class_b =
809 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
810 		       group_traffic_class_b_store);
811 
812 static struct attribute *idxd_group_attributes[] = {
813 	&dev_attr_group_work_queues.attr,
814 	&dev_attr_group_engines.attr,
815 	&dev_attr_group_use_token_limit.attr,
816 	&dev_attr_group_tokens_allowed.attr,
817 	&dev_attr_group_tokens_reserved.attr,
818 	&dev_attr_group_traffic_class_a.attr,
819 	&dev_attr_group_traffic_class_b.attr,
820 	NULL,
821 };
822 
823 static const struct attribute_group idxd_group_attribute_group = {
824 	.attrs = idxd_group_attributes,
825 };
826 
827 static const struct attribute_group *idxd_group_attribute_groups[] = {
828 	&idxd_group_attribute_group,
829 	NULL,
830 };
831 
832 /* IDXD work queue attribs */
833 static ssize_t wq_clients_show(struct device *dev,
834 			       struct device_attribute *attr, char *buf)
835 {
836 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
837 
838 	return sprintf(buf, "%d\n", wq->client_count);
839 }
840 
841 static struct device_attribute dev_attr_wq_clients =
842 		__ATTR(clients, 0444, wq_clients_show, NULL);
843 
844 static ssize_t wq_state_show(struct device *dev,
845 			     struct device_attribute *attr, char *buf)
846 {
847 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
848 
849 	switch (wq->state) {
850 	case IDXD_WQ_DISABLED:
851 		return sprintf(buf, "disabled\n");
852 	case IDXD_WQ_ENABLED:
853 		return sprintf(buf, "enabled\n");
854 	}
855 
856 	return sprintf(buf, "unknown\n");
857 }
858 
859 static struct device_attribute dev_attr_wq_state =
860 		__ATTR(state, 0444, wq_state_show, NULL);
861 
862 static ssize_t wq_group_id_show(struct device *dev,
863 				struct device_attribute *attr, char *buf)
864 {
865 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
866 
867 	if (wq->group)
868 		return sprintf(buf, "%u\n", wq->group->id);
869 	else
870 		return sprintf(buf, "-1\n");
871 }
872 
873 static ssize_t wq_group_id_store(struct device *dev,
874 				 struct device_attribute *attr,
875 				 const char *buf, size_t count)
876 {
877 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
878 	struct idxd_device *idxd = wq->idxd;
879 	long id;
880 	int rc;
881 	struct idxd_group *prevg, *group;
882 
883 	rc = kstrtol(buf, 10, &id);
884 	if (rc < 0)
885 		return -EINVAL;
886 
887 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
888 		return -EPERM;
889 
890 	if (wq->state != IDXD_WQ_DISABLED)
891 		return -EPERM;
892 
893 	if (id > idxd->max_groups - 1 || id < -1)
894 		return -EINVAL;
895 
896 	if (id == -1) {
897 		if (wq->group) {
898 			wq->group->num_wqs--;
899 			wq->group = NULL;
900 		}
901 		return count;
902 	}
903 
904 	group = &idxd->groups[id];
905 	prevg = wq->group;
906 
907 	if (prevg)
908 		prevg->num_wqs--;
909 	wq->group = group;
910 	group->num_wqs++;
911 	return count;
912 }
913 
914 static struct device_attribute dev_attr_wq_group_id =
915 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
916 
917 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
918 			    char *buf)
919 {
920 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
921 
922 	return sprintf(buf, "%s\n",
923 			wq_dedicated(wq) ? "dedicated" : "shared");
924 }
925 
926 static ssize_t wq_mode_store(struct device *dev,
927 			     struct device_attribute *attr, const char *buf,
928 			     size_t count)
929 {
930 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
931 	struct idxd_device *idxd = wq->idxd;
932 
933 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
934 		return -EPERM;
935 
936 	if (wq->state != IDXD_WQ_DISABLED)
937 		return -EPERM;
938 
939 	if (sysfs_streq(buf, "dedicated")) {
940 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
941 		wq->threshold = 0;
942 	} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
943 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
944 	} else {
945 		return -EINVAL;
946 	}
947 
948 	return count;
949 }
950 
951 static struct device_attribute dev_attr_wq_mode =
952 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
953 
954 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
955 			    char *buf)
956 {
957 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
958 
959 	return sprintf(buf, "%u\n", wq->size);
960 }
961 
962 static int total_claimed_wq_size(struct idxd_device *idxd)
963 {
964 	int i;
965 	int wq_size = 0;
966 
967 	for (i = 0; i < idxd->max_wqs; i++) {
968 		struct idxd_wq *wq = &idxd->wqs[i];
969 
970 		wq_size += wq->size;
971 	}
972 
973 	return wq_size;
974 }
975 
976 static ssize_t wq_size_store(struct device *dev,
977 			     struct device_attribute *attr, const char *buf,
978 			     size_t count)
979 {
980 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
981 	unsigned long size;
982 	struct idxd_device *idxd = wq->idxd;
983 	int rc;
984 
985 	rc = kstrtoul(buf, 10, &size);
986 	if (rc < 0)
987 		return -EINVAL;
988 
989 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
990 		return -EPERM;
991 
992 	if (wq->state != IDXD_WQ_DISABLED)
993 		return -EPERM;
994 
995 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
996 		return -EINVAL;
997 
998 	wq->size = size;
999 	return count;
1000 }
1001 
1002 static struct device_attribute dev_attr_wq_size =
1003 		__ATTR(size, 0644, wq_size_show, wq_size_store);
1004 
1005 static ssize_t wq_priority_show(struct device *dev,
1006 				struct device_attribute *attr, char *buf)
1007 {
1008 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1009 
1010 	return sprintf(buf, "%u\n", wq->priority);
1011 }
1012 
1013 static ssize_t wq_priority_store(struct device *dev,
1014 				 struct device_attribute *attr,
1015 				 const char *buf, size_t count)
1016 {
1017 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1018 	unsigned long prio;
1019 	struct idxd_device *idxd = wq->idxd;
1020 	int rc;
1021 
1022 	rc = kstrtoul(buf, 10, &prio);
1023 	if (rc < 0)
1024 		return -EINVAL;
1025 
1026 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1027 		return -EPERM;
1028 
1029 	if (wq->state != IDXD_WQ_DISABLED)
1030 		return -EPERM;
1031 
1032 	if (prio > IDXD_MAX_PRIORITY)
1033 		return -EINVAL;
1034 
1035 	wq->priority = prio;
1036 	return count;
1037 }
1038 
1039 static struct device_attribute dev_attr_wq_priority =
1040 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
1041 
1042 static ssize_t wq_block_on_fault_show(struct device *dev,
1043 				      struct device_attribute *attr, char *buf)
1044 {
1045 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1046 
1047 	return sprintf(buf, "%u\n",
1048 		       test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
1049 }
1050 
1051 static ssize_t wq_block_on_fault_store(struct device *dev,
1052 				       struct device_attribute *attr,
1053 				       const char *buf, size_t count)
1054 {
1055 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1056 	struct idxd_device *idxd = wq->idxd;
1057 	bool bof;
1058 	int rc;
1059 
1060 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1061 		return -EPERM;
1062 
1063 	if (wq->state != IDXD_WQ_DISABLED)
1064 		return -ENXIO;
1065 
1066 	rc = kstrtobool(buf, &bof);
1067 	if (rc < 0)
1068 		return rc;
1069 
1070 	if (bof)
1071 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1072 	else
1073 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1074 
1075 	return count;
1076 }
1077 
1078 static struct device_attribute dev_attr_wq_block_on_fault =
1079 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
1080 		       wq_block_on_fault_store);
1081 
1082 static ssize_t wq_threshold_show(struct device *dev,
1083 				 struct device_attribute *attr, char *buf)
1084 {
1085 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1086 
1087 	return sprintf(buf, "%u\n", wq->threshold);
1088 }
1089 
1090 static ssize_t wq_threshold_store(struct device *dev,
1091 				  struct device_attribute *attr,
1092 				  const char *buf, size_t count)
1093 {
1094 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1095 	struct idxd_device *idxd = wq->idxd;
1096 	unsigned int val;
1097 	int rc;
1098 
1099 	rc = kstrtouint(buf, 0, &val);
1100 	if (rc < 0)
1101 		return -EINVAL;
1102 
1103 	if (val > wq->size || val <= 0)
1104 		return -EINVAL;
1105 
1106 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1107 		return -EPERM;
1108 
1109 	if (wq->state != IDXD_WQ_DISABLED)
1110 		return -ENXIO;
1111 
1112 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1113 		return -EINVAL;
1114 
1115 	wq->threshold = val;
1116 
1117 	return count;
1118 }
1119 
1120 static struct device_attribute dev_attr_wq_threshold =
1121 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1122 
1123 static ssize_t wq_type_show(struct device *dev,
1124 			    struct device_attribute *attr, char *buf)
1125 {
1126 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1127 
1128 	switch (wq->type) {
1129 	case IDXD_WQT_KERNEL:
1130 		return sprintf(buf, "%s\n",
1131 			       idxd_wq_type_names[IDXD_WQT_KERNEL]);
1132 	case IDXD_WQT_USER:
1133 		return sprintf(buf, "%s\n",
1134 			       idxd_wq_type_names[IDXD_WQT_USER]);
1135 	case IDXD_WQT_NONE:
1136 	default:
1137 		return sprintf(buf, "%s\n",
1138 			       idxd_wq_type_names[IDXD_WQT_NONE]);
1139 	}
1140 
1141 	return -EINVAL;
1142 }
1143 
1144 static ssize_t wq_type_store(struct device *dev,
1145 			     struct device_attribute *attr, const char *buf,
1146 			     size_t count)
1147 {
1148 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1149 	enum idxd_wq_type old_type;
1150 
1151 	if (wq->state != IDXD_WQ_DISABLED)
1152 		return -EPERM;
1153 
1154 	old_type = wq->type;
1155 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1156 		wq->type = IDXD_WQT_NONE;
1157 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1158 		wq->type = IDXD_WQT_KERNEL;
1159 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1160 		wq->type = IDXD_WQT_USER;
1161 	else
1162 		return -EINVAL;
1163 
1164 	/* If we are changing queue type, clear the name */
1165 	if (wq->type != old_type)
1166 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
1167 
1168 	return count;
1169 }
1170 
1171 static struct device_attribute dev_attr_wq_type =
1172 		__ATTR(type, 0644, wq_type_show, wq_type_store);
1173 
1174 static ssize_t wq_name_show(struct device *dev,
1175 			    struct device_attribute *attr, char *buf)
1176 {
1177 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1178 
1179 	return sprintf(buf, "%s\n", wq->name);
1180 }
1181 
1182 static ssize_t wq_name_store(struct device *dev,
1183 			     struct device_attribute *attr, const char *buf,
1184 			     size_t count)
1185 {
1186 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1187 
1188 	if (wq->state != IDXD_WQ_DISABLED)
1189 		return -EPERM;
1190 
1191 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1192 		return -EINVAL;
1193 
1194 	/*
1195 	 * This is temporarily placed here until we have SVM support for
1196 	 * dmaengine.
1197 	 */
1198 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1199 		return -EOPNOTSUPP;
1200 
1201 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
1202 	strncpy(wq->name, buf, WQ_NAME_SIZE);
1203 	strreplace(wq->name, '\n', '\0');
1204 	return count;
1205 }
1206 
1207 static struct device_attribute dev_attr_wq_name =
1208 		__ATTR(name, 0644, wq_name_show, wq_name_store);
1209 
1210 static ssize_t wq_cdev_minor_show(struct device *dev,
1211 				  struct device_attribute *attr, char *buf)
1212 {
1213 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1214 
1215 	return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1216 }
1217 
1218 static struct device_attribute dev_attr_wq_cdev_minor =
1219 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1220 
1221 static int __get_sysfs_u64(const char *buf, u64 *val)
1222 {
1223 	int rc;
1224 
1225 	rc = kstrtou64(buf, 0, val);
1226 	if (rc < 0)
1227 		return -EINVAL;
1228 
1229 	if (*val == 0)
1230 		return -EINVAL;
1231 
1232 	*val = roundup_pow_of_two(*val);
1233 	return 0;
1234 }
1235 
1236 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1237 					 char *buf)
1238 {
1239 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1240 
1241 	return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
1242 }
1243 
1244 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1245 					  const char *buf, size_t count)
1246 {
1247 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1248 	struct idxd_device *idxd = wq->idxd;
1249 	u64 xfer_size;
1250 	int rc;
1251 
1252 	if (wq->state != IDXD_WQ_DISABLED)
1253 		return -EPERM;
1254 
1255 	rc = __get_sysfs_u64(buf, &xfer_size);
1256 	if (rc < 0)
1257 		return rc;
1258 
1259 	if (xfer_size > idxd->max_xfer_bytes)
1260 		return -EINVAL;
1261 
1262 	wq->max_xfer_bytes = xfer_size;
1263 
1264 	return count;
1265 }
1266 
1267 static struct device_attribute dev_attr_wq_max_transfer_size =
1268 		__ATTR(max_transfer_size, 0644,
1269 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
1270 
1271 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1272 {
1273 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1274 
1275 	return sprintf(buf, "%u\n", wq->max_batch_size);
1276 }
1277 
1278 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1279 				       const char *buf, size_t count)
1280 {
1281 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1282 	struct idxd_device *idxd = wq->idxd;
1283 	u64 batch_size;
1284 	int rc;
1285 
1286 	if (wq->state != IDXD_WQ_DISABLED)
1287 		return -EPERM;
1288 
1289 	rc = __get_sysfs_u64(buf, &batch_size);
1290 	if (rc < 0)
1291 		return rc;
1292 
1293 	if (batch_size > idxd->max_batch_size)
1294 		return -EINVAL;
1295 
1296 	wq->max_batch_size = (u32)batch_size;
1297 
1298 	return count;
1299 }
1300 
1301 static struct device_attribute dev_attr_wq_max_batch_size =
1302 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1303 
1304 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1305 {
1306 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1307 
1308 	return sprintf(buf, "%u\n", wq->ats_dis);
1309 }
1310 
1311 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1312 				    const char *buf, size_t count)
1313 {
1314 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1315 	struct idxd_device *idxd = wq->idxd;
1316 	bool ats_dis;
1317 	int rc;
1318 
1319 	if (wq->state != IDXD_WQ_DISABLED)
1320 		return -EPERM;
1321 
1322 	if (!idxd->hw.wq_cap.wq_ats_support)
1323 		return -EOPNOTSUPP;
1324 
1325 	rc = kstrtobool(buf, &ats_dis);
1326 	if (rc < 0)
1327 		return rc;
1328 
1329 	wq->ats_dis = ats_dis;
1330 
1331 	return count;
1332 }
1333 
1334 static struct device_attribute dev_attr_wq_ats_disable =
1335 		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1336 
1337 static struct attribute *idxd_wq_attributes[] = {
1338 	&dev_attr_wq_clients.attr,
1339 	&dev_attr_wq_state.attr,
1340 	&dev_attr_wq_group_id.attr,
1341 	&dev_attr_wq_mode.attr,
1342 	&dev_attr_wq_size.attr,
1343 	&dev_attr_wq_priority.attr,
1344 	&dev_attr_wq_block_on_fault.attr,
1345 	&dev_attr_wq_threshold.attr,
1346 	&dev_attr_wq_type.attr,
1347 	&dev_attr_wq_name.attr,
1348 	&dev_attr_wq_cdev_minor.attr,
1349 	&dev_attr_wq_max_transfer_size.attr,
1350 	&dev_attr_wq_max_batch_size.attr,
1351 	&dev_attr_wq_ats_disable.attr,
1352 	NULL,
1353 };
1354 
1355 static const struct attribute_group idxd_wq_attribute_group = {
1356 	.attrs = idxd_wq_attributes,
1357 };
1358 
1359 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1360 	&idxd_wq_attribute_group,
1361 	NULL,
1362 };
1363 
1364 /* IDXD device attribs */
1365 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1366 			    char *buf)
1367 {
1368 	struct idxd_device *idxd =
1369 		container_of(dev, struct idxd_device, conf_dev);
1370 
1371 	return sprintf(buf, "%#x\n", idxd->hw.version);
1372 }
1373 static DEVICE_ATTR_RO(version);
1374 
1375 static ssize_t max_work_queues_size_show(struct device *dev,
1376 					 struct device_attribute *attr,
1377 					 char *buf)
1378 {
1379 	struct idxd_device *idxd =
1380 		container_of(dev, struct idxd_device, conf_dev);
1381 
1382 	return sprintf(buf, "%u\n", idxd->max_wq_size);
1383 }
1384 static DEVICE_ATTR_RO(max_work_queues_size);
1385 
1386 static ssize_t max_groups_show(struct device *dev,
1387 			       struct device_attribute *attr, char *buf)
1388 {
1389 	struct idxd_device *idxd =
1390 		container_of(dev, struct idxd_device, conf_dev);
1391 
1392 	return sprintf(buf, "%u\n", idxd->max_groups);
1393 }
1394 static DEVICE_ATTR_RO(max_groups);
1395 
1396 static ssize_t max_work_queues_show(struct device *dev,
1397 				    struct device_attribute *attr, char *buf)
1398 {
1399 	struct idxd_device *idxd =
1400 		container_of(dev, struct idxd_device, conf_dev);
1401 
1402 	return sprintf(buf, "%u\n", idxd->max_wqs);
1403 }
1404 static DEVICE_ATTR_RO(max_work_queues);
1405 
1406 static ssize_t max_engines_show(struct device *dev,
1407 				struct device_attribute *attr, char *buf)
1408 {
1409 	struct idxd_device *idxd =
1410 		container_of(dev, struct idxd_device, conf_dev);
1411 
1412 	return sprintf(buf, "%u\n", idxd->max_engines);
1413 }
1414 static DEVICE_ATTR_RO(max_engines);
1415 
1416 static ssize_t numa_node_show(struct device *dev,
1417 			      struct device_attribute *attr, char *buf)
1418 {
1419 	struct idxd_device *idxd =
1420 		container_of(dev, struct idxd_device, conf_dev);
1421 
1422 	return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1423 }
1424 static DEVICE_ATTR_RO(numa_node);
1425 
1426 static ssize_t max_batch_size_show(struct device *dev,
1427 				   struct device_attribute *attr, char *buf)
1428 {
1429 	struct idxd_device *idxd =
1430 		container_of(dev, struct idxd_device, conf_dev);
1431 
1432 	return sprintf(buf, "%u\n", idxd->max_batch_size);
1433 }
1434 static DEVICE_ATTR_RO(max_batch_size);
1435 
1436 static ssize_t max_transfer_size_show(struct device *dev,
1437 				      struct device_attribute *attr,
1438 				      char *buf)
1439 {
1440 	struct idxd_device *idxd =
1441 		container_of(dev, struct idxd_device, conf_dev);
1442 
1443 	return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1444 }
1445 static DEVICE_ATTR_RO(max_transfer_size);
1446 
1447 static ssize_t op_cap_show(struct device *dev,
1448 			   struct device_attribute *attr, char *buf)
1449 {
1450 	struct idxd_device *idxd =
1451 		container_of(dev, struct idxd_device, conf_dev);
1452 
1453 	return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1454 }
1455 static DEVICE_ATTR_RO(op_cap);
1456 
1457 static ssize_t gen_cap_show(struct device *dev,
1458 			    struct device_attribute *attr, char *buf)
1459 {
1460 	struct idxd_device *idxd =
1461 		container_of(dev, struct idxd_device, conf_dev);
1462 
1463 	return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1464 }
1465 static DEVICE_ATTR_RO(gen_cap);
1466 
1467 static ssize_t configurable_show(struct device *dev,
1468 				 struct device_attribute *attr, char *buf)
1469 {
1470 	struct idxd_device *idxd =
1471 		container_of(dev, struct idxd_device, conf_dev);
1472 
1473 	return sprintf(buf, "%u\n",
1474 			test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1475 }
1476 static DEVICE_ATTR_RO(configurable);
1477 
1478 static ssize_t clients_show(struct device *dev,
1479 			    struct device_attribute *attr, char *buf)
1480 {
1481 	struct idxd_device *idxd =
1482 		container_of(dev, struct idxd_device, conf_dev);
1483 	unsigned long flags;
1484 	int count = 0, i;
1485 
1486 	spin_lock_irqsave(&idxd->dev_lock, flags);
1487 	for (i = 0; i < idxd->max_wqs; i++) {
1488 		struct idxd_wq *wq = &idxd->wqs[i];
1489 
1490 		count += wq->client_count;
1491 	}
1492 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1493 
1494 	return sprintf(buf, "%d\n", count);
1495 }
1496 static DEVICE_ATTR_RO(clients);
1497 
1498 static ssize_t pasid_enabled_show(struct device *dev,
1499 				  struct device_attribute *attr, char *buf)
1500 {
1501 	struct idxd_device *idxd =
1502 		container_of(dev, struct idxd_device, conf_dev);
1503 
1504 	return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
1505 }
1506 static DEVICE_ATTR_RO(pasid_enabled);
1507 
1508 static ssize_t state_show(struct device *dev,
1509 			  struct device_attribute *attr, char *buf)
1510 {
1511 	struct idxd_device *idxd =
1512 		container_of(dev, struct idxd_device, conf_dev);
1513 
1514 	switch (idxd->state) {
1515 	case IDXD_DEV_DISABLED:
1516 	case IDXD_DEV_CONF_READY:
1517 		return sprintf(buf, "disabled\n");
1518 	case IDXD_DEV_ENABLED:
1519 		return sprintf(buf, "enabled\n");
1520 	case IDXD_DEV_HALTED:
1521 		return sprintf(buf, "halted\n");
1522 	}
1523 
1524 	return sprintf(buf, "unknown\n");
1525 }
1526 static DEVICE_ATTR_RO(state);
1527 
1528 static ssize_t errors_show(struct device *dev,
1529 			   struct device_attribute *attr, char *buf)
1530 {
1531 	struct idxd_device *idxd =
1532 		container_of(dev, struct idxd_device, conf_dev);
1533 	int i, out = 0;
1534 	unsigned long flags;
1535 
1536 	spin_lock_irqsave(&idxd->dev_lock, flags);
1537 	for (i = 0; i < 4; i++)
1538 		out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1539 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1540 	out--;
1541 	out += sprintf(buf + out, "\n");
1542 	return out;
1543 }
1544 static DEVICE_ATTR_RO(errors);
1545 
1546 static ssize_t max_tokens_show(struct device *dev,
1547 			       struct device_attribute *attr, char *buf)
1548 {
1549 	struct idxd_device *idxd =
1550 		container_of(dev, struct idxd_device, conf_dev);
1551 
1552 	return sprintf(buf, "%u\n", idxd->max_tokens);
1553 }
1554 static DEVICE_ATTR_RO(max_tokens);
1555 
1556 static ssize_t token_limit_show(struct device *dev,
1557 				struct device_attribute *attr, char *buf)
1558 {
1559 	struct idxd_device *idxd =
1560 		container_of(dev, struct idxd_device, conf_dev);
1561 
1562 	return sprintf(buf, "%u\n", idxd->token_limit);
1563 }
1564 
1565 static ssize_t token_limit_store(struct device *dev,
1566 				 struct device_attribute *attr,
1567 				 const char *buf, size_t count)
1568 {
1569 	struct idxd_device *idxd =
1570 		container_of(dev, struct idxd_device, conf_dev);
1571 	unsigned long val;
1572 	int rc;
1573 
1574 	rc = kstrtoul(buf, 10, &val);
1575 	if (rc < 0)
1576 		return -EINVAL;
1577 
1578 	if (idxd->state == IDXD_DEV_ENABLED)
1579 		return -EPERM;
1580 
1581 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1582 		return -EPERM;
1583 
1584 	if (!idxd->hw.group_cap.token_limit)
1585 		return -EPERM;
1586 
1587 	if (val > idxd->hw.group_cap.total_tokens)
1588 		return -EINVAL;
1589 
1590 	idxd->token_limit = val;
1591 	return count;
1592 }
1593 static DEVICE_ATTR_RW(token_limit);
1594 
1595 static ssize_t cdev_major_show(struct device *dev,
1596 			       struct device_attribute *attr, char *buf)
1597 {
1598 	struct idxd_device *idxd =
1599 		container_of(dev, struct idxd_device, conf_dev);
1600 
1601 	return sprintf(buf, "%u\n", idxd->major);
1602 }
1603 static DEVICE_ATTR_RO(cdev_major);
1604 
1605 static ssize_t cmd_status_show(struct device *dev,
1606 			       struct device_attribute *attr, char *buf)
1607 {
1608 	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1609 
1610 	return sprintf(buf, "%#x\n", idxd->cmd_status);
1611 }
1612 static DEVICE_ATTR_RO(cmd_status);
1613 
1614 static struct attribute *idxd_device_attributes[] = {
1615 	&dev_attr_version.attr,
1616 	&dev_attr_max_groups.attr,
1617 	&dev_attr_max_work_queues.attr,
1618 	&dev_attr_max_work_queues_size.attr,
1619 	&dev_attr_max_engines.attr,
1620 	&dev_attr_numa_node.attr,
1621 	&dev_attr_max_batch_size.attr,
1622 	&dev_attr_max_transfer_size.attr,
1623 	&dev_attr_op_cap.attr,
1624 	&dev_attr_gen_cap.attr,
1625 	&dev_attr_configurable.attr,
1626 	&dev_attr_clients.attr,
1627 	&dev_attr_pasid_enabled.attr,
1628 	&dev_attr_state.attr,
1629 	&dev_attr_errors.attr,
1630 	&dev_attr_max_tokens.attr,
1631 	&dev_attr_token_limit.attr,
1632 	&dev_attr_cdev_major.attr,
1633 	&dev_attr_cmd_status.attr,
1634 	NULL,
1635 };
1636 
1637 static const struct attribute_group idxd_device_attribute_group = {
1638 	.attrs = idxd_device_attributes,
1639 };
1640 
1641 static const struct attribute_group *idxd_attribute_groups[] = {
1642 	&idxd_device_attribute_group,
1643 	NULL,
1644 };
1645 
1646 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1647 {
1648 	struct device *dev = &idxd->pdev->dev;
1649 	int i, rc;
1650 
1651 	for (i = 0; i < idxd->max_engines; i++) {
1652 		struct idxd_engine *engine = &idxd->engines[i];
1653 
1654 		engine->conf_dev.parent = &idxd->conf_dev;
1655 		dev_set_name(&engine->conf_dev, "engine%d.%d",
1656 			     idxd->id, engine->id);
1657 		engine->conf_dev.bus = idxd_get_bus_type(idxd);
1658 		engine->conf_dev.groups = idxd_engine_attribute_groups;
1659 		engine->conf_dev.type = &idxd_engine_device_type;
1660 		dev_dbg(dev, "Engine device register: %s\n",
1661 			dev_name(&engine->conf_dev));
1662 		rc = device_register(&engine->conf_dev);
1663 		if (rc < 0) {
1664 			put_device(&engine->conf_dev);
1665 			goto cleanup;
1666 		}
1667 	}
1668 
1669 	return 0;
1670 
1671 cleanup:
1672 	while (i--) {
1673 		struct idxd_engine *engine = &idxd->engines[i];
1674 
1675 		device_unregister(&engine->conf_dev);
1676 	}
1677 	return rc;
1678 }
1679 
1680 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1681 {
1682 	struct device *dev = &idxd->pdev->dev;
1683 	int i, rc;
1684 
1685 	for (i = 0; i < idxd->max_groups; i++) {
1686 		struct idxd_group *group = &idxd->groups[i];
1687 
1688 		group->conf_dev.parent = &idxd->conf_dev;
1689 		dev_set_name(&group->conf_dev, "group%d.%d",
1690 			     idxd->id, group->id);
1691 		group->conf_dev.bus = idxd_get_bus_type(idxd);
1692 		group->conf_dev.groups = idxd_group_attribute_groups;
1693 		group->conf_dev.type = &idxd_group_device_type;
1694 		dev_dbg(dev, "Group device register: %s\n",
1695 			dev_name(&group->conf_dev));
1696 		rc = device_register(&group->conf_dev);
1697 		if (rc < 0) {
1698 			put_device(&group->conf_dev);
1699 			goto cleanup;
1700 		}
1701 	}
1702 
1703 	return 0;
1704 
1705 cleanup:
1706 	while (i--) {
1707 		struct idxd_group *group = &idxd->groups[i];
1708 
1709 		device_unregister(&group->conf_dev);
1710 	}
1711 	return rc;
1712 }
1713 
1714 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1715 {
1716 	struct device *dev = &idxd->pdev->dev;
1717 	int i, rc;
1718 
1719 	for (i = 0; i < idxd->max_wqs; i++) {
1720 		struct idxd_wq *wq = &idxd->wqs[i];
1721 
1722 		wq->conf_dev.parent = &idxd->conf_dev;
1723 		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1724 		wq->conf_dev.bus = idxd_get_bus_type(idxd);
1725 		wq->conf_dev.groups = idxd_wq_attribute_groups;
1726 		wq->conf_dev.type = &idxd_wq_device_type;
1727 		dev_dbg(dev, "WQ device register: %s\n",
1728 			dev_name(&wq->conf_dev));
1729 		rc = device_register(&wq->conf_dev);
1730 		if (rc < 0) {
1731 			put_device(&wq->conf_dev);
1732 			goto cleanup;
1733 		}
1734 	}
1735 
1736 	return 0;
1737 
1738 cleanup:
1739 	while (i--) {
1740 		struct idxd_wq *wq = &idxd->wqs[i];
1741 
1742 		device_unregister(&wq->conf_dev);
1743 	}
1744 	return rc;
1745 }
1746 
1747 static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1748 {
1749 	struct device *dev = &idxd->pdev->dev;
1750 	int rc;
1751 	char devname[IDXD_NAME_SIZE];
1752 
1753 	sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1754 	idxd->conf_dev.parent = dev;
1755 	dev_set_name(&idxd->conf_dev, "%s", devname);
1756 	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1757 	idxd->conf_dev.groups = idxd_attribute_groups;
1758 	idxd->conf_dev.type = idxd_get_device_type(idxd);
1759 
1760 	dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1761 	rc = device_register(&idxd->conf_dev);
1762 	if (rc < 0) {
1763 		put_device(&idxd->conf_dev);
1764 		return rc;
1765 	}
1766 
1767 	return 0;
1768 }
1769 
1770 int idxd_setup_sysfs(struct idxd_device *idxd)
1771 {
1772 	struct device *dev = &idxd->pdev->dev;
1773 	int rc;
1774 
1775 	rc = idxd_setup_device_sysfs(idxd);
1776 	if (rc < 0) {
1777 		dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1778 		return rc;
1779 	}
1780 
1781 	rc = idxd_setup_wq_sysfs(idxd);
1782 	if (rc < 0) {
1783 		/* unregister conf dev */
1784 		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1785 		return rc;
1786 	}
1787 
1788 	rc = idxd_setup_group_sysfs(idxd);
1789 	if (rc < 0) {
1790 		/* unregister conf dev */
1791 		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1792 		return rc;
1793 	}
1794 
1795 	rc = idxd_setup_engine_sysfs(idxd);
1796 	if (rc < 0) {
1797 		/* unregister conf dev */
1798 		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1799 		return rc;
1800 	}
1801 
1802 	return 0;
1803 }
1804 
1805 void idxd_cleanup_sysfs(struct idxd_device *idxd)
1806 {
1807 	int i;
1808 
1809 	for (i = 0; i < idxd->max_wqs; i++) {
1810 		struct idxd_wq *wq = &idxd->wqs[i];
1811 
1812 		device_unregister(&wq->conf_dev);
1813 	}
1814 
1815 	for (i = 0; i < idxd->max_engines; i++) {
1816 		struct idxd_engine *engine = &idxd->engines[i];
1817 
1818 		device_unregister(&engine->conf_dev);
1819 	}
1820 
1821 	for (i = 0; i < idxd->max_groups; i++) {
1822 		struct idxd_group *group = &idxd->groups[i];
1823 
1824 		device_unregister(&group->conf_dev);
1825 	}
1826 
1827 	device_unregister(&idxd->conf_dev);
1828 }
1829 
1830 int idxd_register_bus_type(void)
1831 {
1832 	int i, rc;
1833 
1834 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
1835 		rc = bus_register(idxd_bus_types[i]);
1836 		if (rc < 0)
1837 			goto bus_err;
1838 	}
1839 
1840 	return 0;
1841 
1842 bus_err:
1843 	while (--i >= 0)
1844 		bus_unregister(idxd_bus_types[i]);
1845 	return rc;
1846 }
1847 
1848 void idxd_unregister_bus_type(void)
1849 {
1850 	int i;
1851 
1852 	for (i = 0; i < IDXD_TYPE_MAX; i++)
1853 		bus_unregister(idxd_bus_types[i]);
1854 }
1855