xref: /openbmc/linux/drivers/dma/idxd/sysfs.c (revision dc6a81c3)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 static void idxd_conf_device_release(struct device *dev)
20 {
21 	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22 }
23 
24 static struct device_type idxd_group_device_type = {
25 	.name = "group",
26 	.release = idxd_conf_device_release,
27 };
28 
29 static struct device_type idxd_wq_device_type = {
30 	.name = "wq",
31 	.release = idxd_conf_device_release,
32 };
33 
34 static struct device_type idxd_engine_device_type = {
35 	.name = "engine",
36 	.release = idxd_conf_device_release,
37 };
38 
39 static struct device_type dsa_device_type = {
40 	.name = "dsa",
41 	.release = idxd_conf_device_release,
42 };
43 
44 static inline bool is_dsa_dev(struct device *dev)
45 {
46 	return dev ? dev->type == &dsa_device_type : false;
47 }
48 
49 static inline bool is_idxd_dev(struct device *dev)
50 {
51 	return is_dsa_dev(dev);
52 }
53 
54 static inline bool is_idxd_wq_dev(struct device *dev)
55 {
56 	return dev ? dev->type == &idxd_wq_device_type : false;
57 }
58 
59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
60 {
61 	if (wq->type == IDXD_WQT_KERNEL &&
62 	    strcmp(wq->name, "dmaengine") == 0)
63 		return true;
64 	return false;
65 }
66 
67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
68 {
69 	return wq->type == IDXD_WQT_USER;
70 }
71 
72 static int idxd_config_bus_match(struct device *dev,
73 				 struct device_driver *drv)
74 {
75 	int matched = 0;
76 
77 	if (is_idxd_dev(dev)) {
78 		struct idxd_device *idxd = confdev_to_idxd(dev);
79 
80 		if (idxd->state != IDXD_DEV_CONF_READY)
81 			return 0;
82 		matched = 1;
83 	} else if (is_idxd_wq_dev(dev)) {
84 		struct idxd_wq *wq = confdev_to_wq(dev);
85 		struct idxd_device *idxd = wq->idxd;
86 
87 		if (idxd->state < IDXD_DEV_CONF_READY)
88 			return 0;
89 
90 		if (wq->state != IDXD_WQ_DISABLED) {
91 			dev_dbg(dev, "%s not disabled\n", dev_name(dev));
92 			return 0;
93 		}
94 		matched = 1;
95 	}
96 
97 	if (matched)
98 		dev_dbg(dev, "%s matched\n", dev_name(dev));
99 
100 	return matched;
101 }
102 
103 static int idxd_config_bus_probe(struct device *dev)
104 {
105 	int rc;
106 	unsigned long flags;
107 
108 	dev_dbg(dev, "%s called\n", __func__);
109 
110 	if (is_idxd_dev(dev)) {
111 		struct idxd_device *idxd = confdev_to_idxd(dev);
112 
113 		if (idxd->state != IDXD_DEV_CONF_READY) {
114 			dev_warn(dev, "Device not ready for config\n");
115 			return -EBUSY;
116 		}
117 
118 		if (!try_module_get(THIS_MODULE))
119 			return -ENXIO;
120 
121 		spin_lock_irqsave(&idxd->dev_lock, flags);
122 
123 		/* Perform IDXD configuration and enabling */
124 		rc = idxd_device_config(idxd);
125 		if (rc < 0) {
126 			spin_unlock_irqrestore(&idxd->dev_lock, flags);
127 			dev_warn(dev, "Device config failed: %d\n", rc);
128 			return rc;
129 		}
130 
131 		/* start device */
132 		rc = idxd_device_enable(idxd);
133 		if (rc < 0) {
134 			spin_unlock_irqrestore(&idxd->dev_lock, flags);
135 			dev_warn(dev, "Device enable failed: %d\n", rc);
136 			return rc;
137 		}
138 
139 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
140 		dev_info(dev, "Device %s enabled\n", dev_name(dev));
141 
142 		rc = idxd_register_dma_device(idxd);
143 		if (rc < 0) {
144 			spin_unlock_irqrestore(&idxd->dev_lock, flags);
145 			dev_dbg(dev, "Failed to register dmaengine device\n");
146 			return rc;
147 		}
148 		return 0;
149 	} else if (is_idxd_wq_dev(dev)) {
150 		struct idxd_wq *wq = confdev_to_wq(dev);
151 		struct idxd_device *idxd = wq->idxd;
152 
153 		mutex_lock(&wq->wq_lock);
154 
155 		if (idxd->state != IDXD_DEV_ENABLED) {
156 			mutex_unlock(&wq->wq_lock);
157 			dev_warn(dev, "Enabling while device not enabled.\n");
158 			return -EPERM;
159 		}
160 
161 		if (wq->state != IDXD_WQ_DISABLED) {
162 			mutex_unlock(&wq->wq_lock);
163 			dev_warn(dev, "WQ %d already enabled.\n", wq->id);
164 			return -EBUSY;
165 		}
166 
167 		if (!wq->group) {
168 			mutex_unlock(&wq->wq_lock);
169 			dev_warn(dev, "WQ not attached to group.\n");
170 			return -EINVAL;
171 		}
172 
173 		if (strlen(wq->name) == 0) {
174 			mutex_unlock(&wq->wq_lock);
175 			dev_warn(dev, "WQ name not set.\n");
176 			return -EINVAL;
177 		}
178 
179 		rc = idxd_wq_alloc_resources(wq);
180 		if (rc < 0) {
181 			mutex_unlock(&wq->wq_lock);
182 			dev_warn(dev, "WQ resource alloc failed\n");
183 			return rc;
184 		}
185 
186 		spin_lock_irqsave(&idxd->dev_lock, flags);
187 		rc = idxd_device_config(idxd);
188 		if (rc < 0) {
189 			spin_unlock_irqrestore(&idxd->dev_lock, flags);
190 			mutex_unlock(&wq->wq_lock);
191 			dev_warn(dev, "Writing WQ %d config failed: %d\n",
192 				 wq->id, rc);
193 			return rc;
194 		}
195 
196 		rc = idxd_wq_enable(wq);
197 		if (rc < 0) {
198 			spin_unlock_irqrestore(&idxd->dev_lock, flags);
199 			mutex_unlock(&wq->wq_lock);
200 			dev_warn(dev, "WQ %d enabling failed: %d\n",
201 				 wq->id, rc);
202 			return rc;
203 		}
204 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
205 
206 		rc = idxd_wq_map_portal(wq);
207 		if (rc < 0) {
208 			dev_warn(dev, "wq portal mapping failed: %d\n", rc);
209 			rc = idxd_wq_disable(wq);
210 			if (rc < 0)
211 				dev_warn(dev, "IDXD wq disable failed\n");
212 			spin_unlock_irqrestore(&idxd->dev_lock, flags);
213 			mutex_unlock(&wq->wq_lock);
214 			return rc;
215 		}
216 
217 		wq->client_count = 0;
218 
219 		dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
220 
221 		if (is_idxd_wq_dmaengine(wq)) {
222 			rc = idxd_register_dma_channel(wq);
223 			if (rc < 0) {
224 				dev_dbg(dev, "DMA channel register failed\n");
225 				mutex_unlock(&wq->wq_lock);
226 				return rc;
227 			}
228 		} else if (is_idxd_wq_cdev(wq)) {
229 			rc = idxd_wq_add_cdev(wq);
230 			if (rc < 0) {
231 				dev_dbg(dev, "Cdev creation failed\n");
232 				mutex_unlock(&wq->wq_lock);
233 				return rc;
234 			}
235 		}
236 
237 		mutex_unlock(&wq->wq_lock);
238 		return 0;
239 	}
240 
241 	return -ENODEV;
242 }
243 
244 static void disable_wq(struct idxd_wq *wq)
245 {
246 	struct idxd_device *idxd = wq->idxd;
247 	struct device *dev = &idxd->pdev->dev;
248 	unsigned long flags;
249 	int rc;
250 
251 	mutex_lock(&wq->wq_lock);
252 	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
253 	if (wq->state == IDXD_WQ_DISABLED) {
254 		mutex_unlock(&wq->wq_lock);
255 		return;
256 	}
257 
258 	if (is_idxd_wq_dmaengine(wq))
259 		idxd_unregister_dma_channel(wq);
260 	else if (is_idxd_wq_cdev(wq))
261 		idxd_wq_del_cdev(wq);
262 
263 	if (idxd_wq_refcount(wq))
264 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
265 			 wq->id, idxd_wq_refcount(wq));
266 
267 	idxd_wq_unmap_portal(wq);
268 
269 	spin_lock_irqsave(&idxd->dev_lock, flags);
270 	rc = idxd_wq_disable(wq);
271 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
272 
273 	idxd_wq_free_resources(wq);
274 	wq->client_count = 0;
275 	mutex_unlock(&wq->wq_lock);
276 
277 	if (rc < 0)
278 		dev_warn(dev, "Failed to disable %s: %d\n",
279 			 dev_name(&wq->conf_dev), rc);
280 	else
281 		dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
282 }
283 
284 static int idxd_config_bus_remove(struct device *dev)
285 {
286 	int rc;
287 	unsigned long flags;
288 
289 	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
290 
291 	/* disable workqueue here */
292 	if (is_idxd_wq_dev(dev)) {
293 		struct idxd_wq *wq = confdev_to_wq(dev);
294 
295 		disable_wq(wq);
296 	} else if (is_idxd_dev(dev)) {
297 		struct idxd_device *idxd = confdev_to_idxd(dev);
298 		int i;
299 
300 		dev_dbg(dev, "%s removing dev %s\n", __func__,
301 			dev_name(&idxd->conf_dev));
302 		for (i = 0; i < idxd->max_wqs; i++) {
303 			struct idxd_wq *wq = &idxd->wqs[i];
304 
305 			if (wq->state == IDXD_WQ_DISABLED)
306 				continue;
307 			dev_warn(dev, "Active wq %d on disable %s.\n", i,
308 				 dev_name(&idxd->conf_dev));
309 			device_release_driver(&wq->conf_dev);
310 		}
311 
312 		idxd_unregister_dma_device(idxd);
313 		spin_lock_irqsave(&idxd->dev_lock, flags);
314 		rc = idxd_device_disable(idxd);
315 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
316 		module_put(THIS_MODULE);
317 		if (rc < 0)
318 			dev_warn(dev, "Device disable failed\n");
319 		else
320 			dev_info(dev, "Device %s disabled\n", dev_name(dev));
321 
322 	}
323 
324 	return 0;
325 }
326 
327 static void idxd_config_bus_shutdown(struct device *dev)
328 {
329 	dev_dbg(dev, "%s called\n", __func__);
330 }
331 
332 struct bus_type dsa_bus_type = {
333 	.name = "dsa",
334 	.match = idxd_config_bus_match,
335 	.probe = idxd_config_bus_probe,
336 	.remove = idxd_config_bus_remove,
337 	.shutdown = idxd_config_bus_shutdown,
338 };
339 
340 static struct bus_type *idxd_bus_types[] = {
341 	&dsa_bus_type
342 };
343 
344 static struct idxd_device_driver dsa_drv = {
345 	.drv = {
346 		.name = "dsa",
347 		.bus = &dsa_bus_type,
348 		.owner = THIS_MODULE,
349 		.mod_name = KBUILD_MODNAME,
350 	},
351 };
352 
353 static struct idxd_device_driver *idxd_drvs[] = {
354 	&dsa_drv
355 };
356 
357 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
358 {
359 	return idxd_bus_types[idxd->type];
360 }
361 
362 static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
363 {
364 	if (idxd->type == IDXD_TYPE_DSA)
365 		return &dsa_device_type;
366 	else
367 		return NULL;
368 }
369 
370 /* IDXD generic driver setup */
371 int idxd_register_driver(void)
372 {
373 	int i, rc;
374 
375 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
376 		rc = driver_register(&idxd_drvs[i]->drv);
377 		if (rc < 0)
378 			goto drv_fail;
379 	}
380 
381 	return 0;
382 
383 drv_fail:
384 	for (; i > 0; i--)
385 		driver_unregister(&idxd_drvs[i]->drv);
386 	return rc;
387 }
388 
389 void idxd_unregister_driver(void)
390 {
391 	int i;
392 
393 	for (i = 0; i < IDXD_TYPE_MAX; i++)
394 		driver_unregister(&idxd_drvs[i]->drv);
395 }
396 
397 /* IDXD engine attributes */
398 static ssize_t engine_group_id_show(struct device *dev,
399 				    struct device_attribute *attr, char *buf)
400 {
401 	struct idxd_engine *engine =
402 		container_of(dev, struct idxd_engine, conf_dev);
403 
404 	if (engine->group)
405 		return sprintf(buf, "%d\n", engine->group->id);
406 	else
407 		return sprintf(buf, "%d\n", -1);
408 }
409 
410 static ssize_t engine_group_id_store(struct device *dev,
411 				     struct device_attribute *attr,
412 				     const char *buf, size_t count)
413 {
414 	struct idxd_engine *engine =
415 		container_of(dev, struct idxd_engine, conf_dev);
416 	struct idxd_device *idxd = engine->idxd;
417 	long id;
418 	int rc;
419 	struct idxd_group *prevg, *group;
420 
421 	rc = kstrtol(buf, 10, &id);
422 	if (rc < 0)
423 		return -EINVAL;
424 
425 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
426 		return -EPERM;
427 
428 	if (id > idxd->max_groups - 1 || id < -1)
429 		return -EINVAL;
430 
431 	if (id == -1) {
432 		if (engine->group) {
433 			engine->group->num_engines--;
434 			engine->group = NULL;
435 		}
436 		return count;
437 	}
438 
439 	group = &idxd->groups[id];
440 	prevg = engine->group;
441 
442 	if (prevg)
443 		prevg->num_engines--;
444 	engine->group = &idxd->groups[id];
445 	engine->group->num_engines++;
446 
447 	return count;
448 }
449 
450 static struct device_attribute dev_attr_engine_group =
451 		__ATTR(group_id, 0644, engine_group_id_show,
452 		       engine_group_id_store);
453 
454 static struct attribute *idxd_engine_attributes[] = {
455 	&dev_attr_engine_group.attr,
456 	NULL,
457 };
458 
459 static const struct attribute_group idxd_engine_attribute_group = {
460 	.attrs = idxd_engine_attributes,
461 };
462 
463 static const struct attribute_group *idxd_engine_attribute_groups[] = {
464 	&idxd_engine_attribute_group,
465 	NULL,
466 };
467 
468 /* Group attributes */
469 
470 static void idxd_set_free_tokens(struct idxd_device *idxd)
471 {
472 	int i, tokens;
473 
474 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
475 		struct idxd_group *g = &idxd->groups[i];
476 
477 		tokens += g->tokens_reserved;
478 	}
479 
480 	idxd->nr_tokens = idxd->max_tokens - tokens;
481 }
482 
483 static ssize_t group_tokens_reserved_show(struct device *dev,
484 					  struct device_attribute *attr,
485 					  char *buf)
486 {
487 	struct idxd_group *group =
488 		container_of(dev, struct idxd_group, conf_dev);
489 
490 	return sprintf(buf, "%u\n", group->tokens_reserved);
491 }
492 
493 static ssize_t group_tokens_reserved_store(struct device *dev,
494 					   struct device_attribute *attr,
495 					   const char *buf, size_t count)
496 {
497 	struct idxd_group *group =
498 		container_of(dev, struct idxd_group, conf_dev);
499 	struct idxd_device *idxd = group->idxd;
500 	unsigned long val;
501 	int rc;
502 
503 	rc = kstrtoul(buf, 10, &val);
504 	if (rc < 0)
505 		return -EINVAL;
506 
507 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
508 		return -EPERM;
509 
510 	if (idxd->state == IDXD_DEV_ENABLED)
511 		return -EPERM;
512 
513 	if (idxd->token_limit == 0)
514 		return -EPERM;
515 
516 	if (val > idxd->max_tokens)
517 		return -EINVAL;
518 
519 	if (val > idxd->nr_tokens)
520 		return -EINVAL;
521 
522 	group->tokens_reserved = val;
523 	idxd_set_free_tokens(idxd);
524 	return count;
525 }
526 
527 static struct device_attribute dev_attr_group_tokens_reserved =
528 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
529 		       group_tokens_reserved_store);
530 
531 static ssize_t group_tokens_allowed_show(struct device *dev,
532 					 struct device_attribute *attr,
533 					 char *buf)
534 {
535 	struct idxd_group *group =
536 		container_of(dev, struct idxd_group, conf_dev);
537 
538 	return sprintf(buf, "%u\n", group->tokens_allowed);
539 }
540 
541 static ssize_t group_tokens_allowed_store(struct device *dev,
542 					  struct device_attribute *attr,
543 					  const char *buf, size_t count)
544 {
545 	struct idxd_group *group =
546 		container_of(dev, struct idxd_group, conf_dev);
547 	struct idxd_device *idxd = group->idxd;
548 	unsigned long val;
549 	int rc;
550 
551 	rc = kstrtoul(buf, 10, &val);
552 	if (rc < 0)
553 		return -EINVAL;
554 
555 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
556 		return -EPERM;
557 
558 	if (idxd->state == IDXD_DEV_ENABLED)
559 		return -EPERM;
560 
561 	if (idxd->token_limit == 0)
562 		return -EPERM;
563 	if (val < 4 * group->num_engines ||
564 	    val > group->tokens_reserved + idxd->nr_tokens)
565 		return -EINVAL;
566 
567 	group->tokens_allowed = val;
568 	return count;
569 }
570 
571 static struct device_attribute dev_attr_group_tokens_allowed =
572 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
573 		       group_tokens_allowed_store);
574 
575 static ssize_t group_use_token_limit_show(struct device *dev,
576 					  struct device_attribute *attr,
577 					  char *buf)
578 {
579 	struct idxd_group *group =
580 		container_of(dev, struct idxd_group, conf_dev);
581 
582 	return sprintf(buf, "%u\n", group->use_token_limit);
583 }
584 
585 static ssize_t group_use_token_limit_store(struct device *dev,
586 					   struct device_attribute *attr,
587 					   const char *buf, size_t count)
588 {
589 	struct idxd_group *group =
590 		container_of(dev, struct idxd_group, conf_dev);
591 	struct idxd_device *idxd = group->idxd;
592 	unsigned long val;
593 	int rc;
594 
595 	rc = kstrtoul(buf, 10, &val);
596 	if (rc < 0)
597 		return -EINVAL;
598 
599 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
600 		return -EPERM;
601 
602 	if (idxd->state == IDXD_DEV_ENABLED)
603 		return -EPERM;
604 
605 	if (idxd->token_limit == 0)
606 		return -EPERM;
607 
608 	group->use_token_limit = !!val;
609 	return count;
610 }
611 
612 static struct device_attribute dev_attr_group_use_token_limit =
613 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
614 		       group_use_token_limit_store);
615 
616 static ssize_t group_engines_show(struct device *dev,
617 				  struct device_attribute *attr, char *buf)
618 {
619 	struct idxd_group *group =
620 		container_of(dev, struct idxd_group, conf_dev);
621 	int i, rc = 0;
622 	char *tmp = buf;
623 	struct idxd_device *idxd = group->idxd;
624 
625 	for (i = 0; i < idxd->max_engines; i++) {
626 		struct idxd_engine *engine = &idxd->engines[i];
627 
628 		if (!engine->group)
629 			continue;
630 
631 		if (engine->group->id == group->id)
632 			rc += sprintf(tmp + rc, "engine%d.%d ",
633 					idxd->id, engine->id);
634 	}
635 
636 	rc--;
637 	rc += sprintf(tmp + rc, "\n");
638 
639 	return rc;
640 }
641 
642 static struct device_attribute dev_attr_group_engines =
643 		__ATTR(engines, 0444, group_engines_show, NULL);
644 
645 static ssize_t group_work_queues_show(struct device *dev,
646 				      struct device_attribute *attr, char *buf)
647 {
648 	struct idxd_group *group =
649 		container_of(dev, struct idxd_group, conf_dev);
650 	int i, rc = 0;
651 	char *tmp = buf;
652 	struct idxd_device *idxd = group->idxd;
653 
654 	for (i = 0; i < idxd->max_wqs; i++) {
655 		struct idxd_wq *wq = &idxd->wqs[i];
656 
657 		if (!wq->group)
658 			continue;
659 
660 		if (wq->group->id == group->id)
661 			rc += sprintf(tmp + rc, "wq%d.%d ",
662 					idxd->id, wq->id);
663 	}
664 
665 	rc--;
666 	rc += sprintf(tmp + rc, "\n");
667 
668 	return rc;
669 }
670 
671 static struct device_attribute dev_attr_group_work_queues =
672 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
673 
674 static ssize_t group_traffic_class_a_show(struct device *dev,
675 					  struct device_attribute *attr,
676 					  char *buf)
677 {
678 	struct idxd_group *group =
679 		container_of(dev, struct idxd_group, conf_dev);
680 
681 	return sprintf(buf, "%d\n", group->tc_a);
682 }
683 
684 static ssize_t group_traffic_class_a_store(struct device *dev,
685 					   struct device_attribute *attr,
686 					   const char *buf, size_t count)
687 {
688 	struct idxd_group *group =
689 		container_of(dev, struct idxd_group, conf_dev);
690 	struct idxd_device *idxd = group->idxd;
691 	long val;
692 	int rc;
693 
694 	rc = kstrtol(buf, 10, &val);
695 	if (rc < 0)
696 		return -EINVAL;
697 
698 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
699 		return -EPERM;
700 
701 	if (idxd->state == IDXD_DEV_ENABLED)
702 		return -EPERM;
703 
704 	if (val < 0 || val > 7)
705 		return -EINVAL;
706 
707 	group->tc_a = val;
708 	return count;
709 }
710 
711 static struct device_attribute dev_attr_group_traffic_class_a =
712 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
713 		       group_traffic_class_a_store);
714 
715 static ssize_t group_traffic_class_b_show(struct device *dev,
716 					  struct device_attribute *attr,
717 					  char *buf)
718 {
719 	struct idxd_group *group =
720 		container_of(dev, struct idxd_group, conf_dev);
721 
722 	return sprintf(buf, "%d\n", group->tc_b);
723 }
724 
725 static ssize_t group_traffic_class_b_store(struct device *dev,
726 					   struct device_attribute *attr,
727 					   const char *buf, size_t count)
728 {
729 	struct idxd_group *group =
730 		container_of(dev, struct idxd_group, conf_dev);
731 	struct idxd_device *idxd = group->idxd;
732 	long val;
733 	int rc;
734 
735 	rc = kstrtol(buf, 10, &val);
736 	if (rc < 0)
737 		return -EINVAL;
738 
739 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
740 		return -EPERM;
741 
742 	if (idxd->state == IDXD_DEV_ENABLED)
743 		return -EPERM;
744 
745 	if (val < 0 || val > 7)
746 		return -EINVAL;
747 
748 	group->tc_b = val;
749 	return count;
750 }
751 
752 static struct device_attribute dev_attr_group_traffic_class_b =
753 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
754 		       group_traffic_class_b_store);
755 
756 static struct attribute *idxd_group_attributes[] = {
757 	&dev_attr_group_work_queues.attr,
758 	&dev_attr_group_engines.attr,
759 	&dev_attr_group_use_token_limit.attr,
760 	&dev_attr_group_tokens_allowed.attr,
761 	&dev_attr_group_tokens_reserved.attr,
762 	&dev_attr_group_traffic_class_a.attr,
763 	&dev_attr_group_traffic_class_b.attr,
764 	NULL,
765 };
766 
767 static const struct attribute_group idxd_group_attribute_group = {
768 	.attrs = idxd_group_attributes,
769 };
770 
771 static const struct attribute_group *idxd_group_attribute_groups[] = {
772 	&idxd_group_attribute_group,
773 	NULL,
774 };
775 
776 /* IDXD work queue attribs */
777 static ssize_t wq_clients_show(struct device *dev,
778 			       struct device_attribute *attr, char *buf)
779 {
780 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
781 
782 	return sprintf(buf, "%d\n", wq->client_count);
783 }
784 
785 static struct device_attribute dev_attr_wq_clients =
786 		__ATTR(clients, 0444, wq_clients_show, NULL);
787 
788 static ssize_t wq_state_show(struct device *dev,
789 			     struct device_attribute *attr, char *buf)
790 {
791 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
792 
793 	switch (wq->state) {
794 	case IDXD_WQ_DISABLED:
795 		return sprintf(buf, "disabled\n");
796 	case IDXD_WQ_ENABLED:
797 		return sprintf(buf, "enabled\n");
798 	}
799 
800 	return sprintf(buf, "unknown\n");
801 }
802 
803 static struct device_attribute dev_attr_wq_state =
804 		__ATTR(state, 0444, wq_state_show, NULL);
805 
806 static ssize_t wq_group_id_show(struct device *dev,
807 				struct device_attribute *attr, char *buf)
808 {
809 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
810 
811 	if (wq->group)
812 		return sprintf(buf, "%u\n", wq->group->id);
813 	else
814 		return sprintf(buf, "-1\n");
815 }
816 
817 static ssize_t wq_group_id_store(struct device *dev,
818 				 struct device_attribute *attr,
819 				 const char *buf, size_t count)
820 {
821 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
822 	struct idxd_device *idxd = wq->idxd;
823 	long id;
824 	int rc;
825 	struct idxd_group *prevg, *group;
826 
827 	rc = kstrtol(buf, 10, &id);
828 	if (rc < 0)
829 		return -EINVAL;
830 
831 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
832 		return -EPERM;
833 
834 	if (wq->state != IDXD_WQ_DISABLED)
835 		return -EPERM;
836 
837 	if (id > idxd->max_groups - 1 || id < -1)
838 		return -EINVAL;
839 
840 	if (id == -1) {
841 		if (wq->group) {
842 			wq->group->num_wqs--;
843 			wq->group = NULL;
844 		}
845 		return count;
846 	}
847 
848 	group = &idxd->groups[id];
849 	prevg = wq->group;
850 
851 	if (prevg)
852 		prevg->num_wqs--;
853 	wq->group = group;
854 	group->num_wqs++;
855 	return count;
856 }
857 
858 static struct device_attribute dev_attr_wq_group_id =
859 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
860 
861 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
862 			    char *buf)
863 {
864 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
865 
866 	return sprintf(buf, "%s\n",
867 			wq_dedicated(wq) ? "dedicated" : "shared");
868 }
869 
870 static ssize_t wq_mode_store(struct device *dev,
871 			     struct device_attribute *attr, const char *buf,
872 			     size_t count)
873 {
874 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
875 	struct idxd_device *idxd = wq->idxd;
876 
877 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
878 		return -EPERM;
879 
880 	if (wq->state != IDXD_WQ_DISABLED)
881 		return -EPERM;
882 
883 	if (sysfs_streq(buf, "dedicated")) {
884 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
885 		wq->threshold = 0;
886 	} else {
887 		return -EINVAL;
888 	}
889 
890 	return count;
891 }
892 
893 static struct device_attribute dev_attr_wq_mode =
894 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
895 
896 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
897 			    char *buf)
898 {
899 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
900 
901 	return sprintf(buf, "%u\n", wq->size);
902 }
903 
904 static ssize_t wq_size_store(struct device *dev,
905 			     struct device_attribute *attr, const char *buf,
906 			     size_t count)
907 {
908 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
909 	unsigned long size;
910 	struct idxd_device *idxd = wq->idxd;
911 	int rc;
912 
913 	rc = kstrtoul(buf, 10, &size);
914 	if (rc < 0)
915 		return -EINVAL;
916 
917 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
918 		return -EPERM;
919 
920 	if (wq->state != IDXD_WQ_DISABLED)
921 		return -EPERM;
922 
923 	if (size > idxd->max_wq_size)
924 		return -EINVAL;
925 
926 	wq->size = size;
927 	return count;
928 }
929 
930 static struct device_attribute dev_attr_wq_size =
931 		__ATTR(size, 0644, wq_size_show, wq_size_store);
932 
933 static ssize_t wq_priority_show(struct device *dev,
934 				struct device_attribute *attr, char *buf)
935 {
936 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
937 
938 	return sprintf(buf, "%u\n", wq->priority);
939 }
940 
941 static ssize_t wq_priority_store(struct device *dev,
942 				 struct device_attribute *attr,
943 				 const char *buf, size_t count)
944 {
945 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
946 	unsigned long prio;
947 	struct idxd_device *idxd = wq->idxd;
948 	int rc;
949 
950 	rc = kstrtoul(buf, 10, &prio);
951 	if (rc < 0)
952 		return -EINVAL;
953 
954 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
955 		return -EPERM;
956 
957 	if (wq->state != IDXD_WQ_DISABLED)
958 		return -EPERM;
959 
960 	if (prio > IDXD_MAX_PRIORITY)
961 		return -EINVAL;
962 
963 	wq->priority = prio;
964 	return count;
965 }
966 
967 static struct device_attribute dev_attr_wq_priority =
968 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
969 
970 static ssize_t wq_type_show(struct device *dev,
971 			    struct device_attribute *attr, char *buf)
972 {
973 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
974 
975 	switch (wq->type) {
976 	case IDXD_WQT_KERNEL:
977 		return sprintf(buf, "%s\n",
978 			       idxd_wq_type_names[IDXD_WQT_KERNEL]);
979 	case IDXD_WQT_USER:
980 		return sprintf(buf, "%s\n",
981 			       idxd_wq_type_names[IDXD_WQT_USER]);
982 	case IDXD_WQT_NONE:
983 	default:
984 		return sprintf(buf, "%s\n",
985 			       idxd_wq_type_names[IDXD_WQT_NONE]);
986 	}
987 
988 	return -EINVAL;
989 }
990 
991 static ssize_t wq_type_store(struct device *dev,
992 			     struct device_attribute *attr, const char *buf,
993 			     size_t count)
994 {
995 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
996 	enum idxd_wq_type old_type;
997 
998 	if (wq->state != IDXD_WQ_DISABLED)
999 		return -EPERM;
1000 
1001 	old_type = wq->type;
1002 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1003 		wq->type = IDXD_WQT_KERNEL;
1004 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1005 		wq->type = IDXD_WQT_USER;
1006 	else
1007 		wq->type = IDXD_WQT_NONE;
1008 
1009 	/* If we are changing queue type, clear the name */
1010 	if (wq->type != old_type)
1011 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
1012 
1013 	return count;
1014 }
1015 
1016 static struct device_attribute dev_attr_wq_type =
1017 		__ATTR(type, 0644, wq_type_show, wq_type_store);
1018 
1019 static ssize_t wq_name_show(struct device *dev,
1020 			    struct device_attribute *attr, char *buf)
1021 {
1022 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1023 
1024 	return sprintf(buf, "%s\n", wq->name);
1025 }
1026 
1027 static ssize_t wq_name_store(struct device *dev,
1028 			     struct device_attribute *attr, const char *buf,
1029 			     size_t count)
1030 {
1031 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1032 
1033 	if (wq->state != IDXD_WQ_DISABLED)
1034 		return -EPERM;
1035 
1036 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1037 		return -EINVAL;
1038 
1039 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
1040 	strncpy(wq->name, buf, WQ_NAME_SIZE);
1041 	strreplace(wq->name, '\n', '\0');
1042 	return count;
1043 }
1044 
1045 static struct device_attribute dev_attr_wq_name =
1046 		__ATTR(name, 0644, wq_name_show, wq_name_store);
1047 
1048 static ssize_t wq_cdev_minor_show(struct device *dev,
1049 				  struct device_attribute *attr, char *buf)
1050 {
1051 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1052 
1053 	return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1054 }
1055 
1056 static struct device_attribute dev_attr_wq_cdev_minor =
1057 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1058 
1059 static struct attribute *idxd_wq_attributes[] = {
1060 	&dev_attr_wq_clients.attr,
1061 	&dev_attr_wq_state.attr,
1062 	&dev_attr_wq_group_id.attr,
1063 	&dev_attr_wq_mode.attr,
1064 	&dev_attr_wq_size.attr,
1065 	&dev_attr_wq_priority.attr,
1066 	&dev_attr_wq_type.attr,
1067 	&dev_attr_wq_name.attr,
1068 	&dev_attr_wq_cdev_minor.attr,
1069 	NULL,
1070 };
1071 
1072 static const struct attribute_group idxd_wq_attribute_group = {
1073 	.attrs = idxd_wq_attributes,
1074 };
1075 
1076 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1077 	&idxd_wq_attribute_group,
1078 	NULL,
1079 };
1080 
1081 /* IDXD device attribs */
1082 static ssize_t max_work_queues_size_show(struct device *dev,
1083 					 struct device_attribute *attr,
1084 					 char *buf)
1085 {
1086 	struct idxd_device *idxd =
1087 		container_of(dev, struct idxd_device, conf_dev);
1088 
1089 	return sprintf(buf, "%u\n", idxd->max_wq_size);
1090 }
1091 static DEVICE_ATTR_RO(max_work_queues_size);
1092 
1093 static ssize_t max_groups_show(struct device *dev,
1094 			       struct device_attribute *attr, char *buf)
1095 {
1096 	struct idxd_device *idxd =
1097 		container_of(dev, struct idxd_device, conf_dev);
1098 
1099 	return sprintf(buf, "%u\n", idxd->max_groups);
1100 }
1101 static DEVICE_ATTR_RO(max_groups);
1102 
1103 static ssize_t max_work_queues_show(struct device *dev,
1104 				    struct device_attribute *attr, char *buf)
1105 {
1106 	struct idxd_device *idxd =
1107 		container_of(dev, struct idxd_device, conf_dev);
1108 
1109 	return sprintf(buf, "%u\n", idxd->max_wqs);
1110 }
1111 static DEVICE_ATTR_RO(max_work_queues);
1112 
1113 static ssize_t max_engines_show(struct device *dev,
1114 				struct device_attribute *attr, char *buf)
1115 {
1116 	struct idxd_device *idxd =
1117 		container_of(dev, struct idxd_device, conf_dev);
1118 
1119 	return sprintf(buf, "%u\n", idxd->max_engines);
1120 }
1121 static DEVICE_ATTR_RO(max_engines);
1122 
1123 static ssize_t numa_node_show(struct device *dev,
1124 			      struct device_attribute *attr, char *buf)
1125 {
1126 	struct idxd_device *idxd =
1127 		container_of(dev, struct idxd_device, conf_dev);
1128 
1129 	return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1130 }
1131 static DEVICE_ATTR_RO(numa_node);
1132 
1133 static ssize_t max_batch_size_show(struct device *dev,
1134 				   struct device_attribute *attr, char *buf)
1135 {
1136 	struct idxd_device *idxd =
1137 		container_of(dev, struct idxd_device, conf_dev);
1138 
1139 	return sprintf(buf, "%u\n", idxd->max_batch_size);
1140 }
1141 static DEVICE_ATTR_RO(max_batch_size);
1142 
1143 static ssize_t max_transfer_size_show(struct device *dev,
1144 				      struct device_attribute *attr,
1145 				      char *buf)
1146 {
1147 	struct idxd_device *idxd =
1148 		container_of(dev, struct idxd_device, conf_dev);
1149 
1150 	return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1151 }
1152 static DEVICE_ATTR_RO(max_transfer_size);
1153 
1154 static ssize_t op_cap_show(struct device *dev,
1155 			   struct device_attribute *attr, char *buf)
1156 {
1157 	struct idxd_device *idxd =
1158 		container_of(dev, struct idxd_device, conf_dev);
1159 
1160 	return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1161 }
1162 static DEVICE_ATTR_RO(op_cap);
1163 
1164 static ssize_t configurable_show(struct device *dev,
1165 				 struct device_attribute *attr, char *buf)
1166 {
1167 	struct idxd_device *idxd =
1168 		container_of(dev, struct idxd_device, conf_dev);
1169 
1170 	return sprintf(buf, "%u\n",
1171 			test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1172 }
1173 static DEVICE_ATTR_RO(configurable);
1174 
1175 static ssize_t clients_show(struct device *dev,
1176 			    struct device_attribute *attr, char *buf)
1177 {
1178 	struct idxd_device *idxd =
1179 		container_of(dev, struct idxd_device, conf_dev);
1180 	unsigned long flags;
1181 	int count = 0, i;
1182 
1183 	spin_lock_irqsave(&idxd->dev_lock, flags);
1184 	for (i = 0; i < idxd->max_wqs; i++) {
1185 		struct idxd_wq *wq = &idxd->wqs[i];
1186 
1187 		count += wq->client_count;
1188 	}
1189 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1190 
1191 	return sprintf(buf, "%d\n", count);
1192 }
1193 static DEVICE_ATTR_RO(clients);
1194 
1195 static ssize_t state_show(struct device *dev,
1196 			  struct device_attribute *attr, char *buf)
1197 {
1198 	struct idxd_device *idxd =
1199 		container_of(dev, struct idxd_device, conf_dev);
1200 
1201 	switch (idxd->state) {
1202 	case IDXD_DEV_DISABLED:
1203 	case IDXD_DEV_CONF_READY:
1204 		return sprintf(buf, "disabled\n");
1205 	case IDXD_DEV_ENABLED:
1206 		return sprintf(buf, "enabled\n");
1207 	case IDXD_DEV_HALTED:
1208 		return sprintf(buf, "halted\n");
1209 	}
1210 
1211 	return sprintf(buf, "unknown\n");
1212 }
1213 static DEVICE_ATTR_RO(state);
1214 
1215 static ssize_t errors_show(struct device *dev,
1216 			   struct device_attribute *attr, char *buf)
1217 {
1218 	struct idxd_device *idxd =
1219 		container_of(dev, struct idxd_device, conf_dev);
1220 	int i, out = 0;
1221 	unsigned long flags;
1222 
1223 	spin_lock_irqsave(&idxd->dev_lock, flags);
1224 	for (i = 0; i < 4; i++)
1225 		out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1226 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1227 	out--;
1228 	out += sprintf(buf + out, "\n");
1229 	return out;
1230 }
1231 static DEVICE_ATTR_RO(errors);
1232 
1233 static ssize_t max_tokens_show(struct device *dev,
1234 			       struct device_attribute *attr, char *buf)
1235 {
1236 	struct idxd_device *idxd =
1237 		container_of(dev, struct idxd_device, conf_dev);
1238 
1239 	return sprintf(buf, "%u\n", idxd->max_tokens);
1240 }
1241 static DEVICE_ATTR_RO(max_tokens);
1242 
1243 static ssize_t token_limit_show(struct device *dev,
1244 				struct device_attribute *attr, char *buf)
1245 {
1246 	struct idxd_device *idxd =
1247 		container_of(dev, struct idxd_device, conf_dev);
1248 
1249 	return sprintf(buf, "%u\n", idxd->token_limit);
1250 }
1251 
1252 static ssize_t token_limit_store(struct device *dev,
1253 				 struct device_attribute *attr,
1254 				 const char *buf, size_t count)
1255 {
1256 	struct idxd_device *idxd =
1257 		container_of(dev, struct idxd_device, conf_dev);
1258 	unsigned long val;
1259 	int rc;
1260 
1261 	rc = kstrtoul(buf, 10, &val);
1262 	if (rc < 0)
1263 		return -EINVAL;
1264 
1265 	if (idxd->state == IDXD_DEV_ENABLED)
1266 		return -EPERM;
1267 
1268 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1269 		return -EPERM;
1270 
1271 	if (!idxd->hw.group_cap.token_limit)
1272 		return -EPERM;
1273 
1274 	if (val > idxd->hw.group_cap.total_tokens)
1275 		return -EINVAL;
1276 
1277 	idxd->token_limit = val;
1278 	return count;
1279 }
1280 static DEVICE_ATTR_RW(token_limit);
1281 
1282 static ssize_t cdev_major_show(struct device *dev,
1283 			       struct device_attribute *attr, char *buf)
1284 {
1285 	struct idxd_device *idxd =
1286 		container_of(dev, struct idxd_device, conf_dev);
1287 
1288 	return sprintf(buf, "%u\n", idxd->major);
1289 }
1290 static DEVICE_ATTR_RO(cdev_major);
1291 
1292 static struct attribute *idxd_device_attributes[] = {
1293 	&dev_attr_max_groups.attr,
1294 	&dev_attr_max_work_queues.attr,
1295 	&dev_attr_max_work_queues_size.attr,
1296 	&dev_attr_max_engines.attr,
1297 	&dev_attr_numa_node.attr,
1298 	&dev_attr_max_batch_size.attr,
1299 	&dev_attr_max_transfer_size.attr,
1300 	&dev_attr_op_cap.attr,
1301 	&dev_attr_configurable.attr,
1302 	&dev_attr_clients.attr,
1303 	&dev_attr_state.attr,
1304 	&dev_attr_errors.attr,
1305 	&dev_attr_max_tokens.attr,
1306 	&dev_attr_token_limit.attr,
1307 	&dev_attr_cdev_major.attr,
1308 	NULL,
1309 };
1310 
1311 static const struct attribute_group idxd_device_attribute_group = {
1312 	.attrs = idxd_device_attributes,
1313 };
1314 
1315 static const struct attribute_group *idxd_attribute_groups[] = {
1316 	&idxd_device_attribute_group,
1317 	NULL,
1318 };
1319 
1320 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1321 {
1322 	struct device *dev = &idxd->pdev->dev;
1323 	int i, rc;
1324 
1325 	for (i = 0; i < idxd->max_engines; i++) {
1326 		struct idxd_engine *engine = &idxd->engines[i];
1327 
1328 		engine->conf_dev.parent = &idxd->conf_dev;
1329 		dev_set_name(&engine->conf_dev, "engine%d.%d",
1330 			     idxd->id, engine->id);
1331 		engine->conf_dev.bus = idxd_get_bus_type(idxd);
1332 		engine->conf_dev.groups = idxd_engine_attribute_groups;
1333 		engine->conf_dev.type = &idxd_engine_device_type;
1334 		dev_dbg(dev, "Engine device register: %s\n",
1335 			dev_name(&engine->conf_dev));
1336 		rc = device_register(&engine->conf_dev);
1337 		if (rc < 0) {
1338 			put_device(&engine->conf_dev);
1339 			goto cleanup;
1340 		}
1341 	}
1342 
1343 	return 0;
1344 
1345 cleanup:
1346 	while (i--) {
1347 		struct idxd_engine *engine = &idxd->engines[i];
1348 
1349 		device_unregister(&engine->conf_dev);
1350 	}
1351 	return rc;
1352 }
1353 
1354 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1355 {
1356 	struct device *dev = &idxd->pdev->dev;
1357 	int i, rc;
1358 
1359 	for (i = 0; i < idxd->max_groups; i++) {
1360 		struct idxd_group *group = &idxd->groups[i];
1361 
1362 		group->conf_dev.parent = &idxd->conf_dev;
1363 		dev_set_name(&group->conf_dev, "group%d.%d",
1364 			     idxd->id, group->id);
1365 		group->conf_dev.bus = idxd_get_bus_type(idxd);
1366 		group->conf_dev.groups = idxd_group_attribute_groups;
1367 		group->conf_dev.type = &idxd_group_device_type;
1368 		dev_dbg(dev, "Group device register: %s\n",
1369 			dev_name(&group->conf_dev));
1370 		rc = device_register(&group->conf_dev);
1371 		if (rc < 0) {
1372 			put_device(&group->conf_dev);
1373 			goto cleanup;
1374 		}
1375 	}
1376 
1377 	return 0;
1378 
1379 cleanup:
1380 	while (i--) {
1381 		struct idxd_group *group = &idxd->groups[i];
1382 
1383 		device_unregister(&group->conf_dev);
1384 	}
1385 	return rc;
1386 }
1387 
1388 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1389 {
1390 	struct device *dev = &idxd->pdev->dev;
1391 	int i, rc;
1392 
1393 	for (i = 0; i < idxd->max_wqs; i++) {
1394 		struct idxd_wq *wq = &idxd->wqs[i];
1395 
1396 		wq->conf_dev.parent = &idxd->conf_dev;
1397 		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1398 		wq->conf_dev.bus = idxd_get_bus_type(idxd);
1399 		wq->conf_dev.groups = idxd_wq_attribute_groups;
1400 		wq->conf_dev.type = &idxd_wq_device_type;
1401 		dev_dbg(dev, "WQ device register: %s\n",
1402 			dev_name(&wq->conf_dev));
1403 		rc = device_register(&wq->conf_dev);
1404 		if (rc < 0) {
1405 			put_device(&wq->conf_dev);
1406 			goto cleanup;
1407 		}
1408 	}
1409 
1410 	return 0;
1411 
1412 cleanup:
1413 	while (i--) {
1414 		struct idxd_wq *wq = &idxd->wqs[i];
1415 
1416 		device_unregister(&wq->conf_dev);
1417 	}
1418 	return rc;
1419 }
1420 
1421 static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1422 {
1423 	struct device *dev = &idxd->pdev->dev;
1424 	int rc;
1425 	char devname[IDXD_NAME_SIZE];
1426 
1427 	sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1428 	idxd->conf_dev.parent = dev;
1429 	dev_set_name(&idxd->conf_dev, "%s", devname);
1430 	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1431 	idxd->conf_dev.groups = idxd_attribute_groups;
1432 	idxd->conf_dev.type = idxd_get_device_type(idxd);
1433 
1434 	dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1435 	rc = device_register(&idxd->conf_dev);
1436 	if (rc < 0) {
1437 		put_device(&idxd->conf_dev);
1438 		return rc;
1439 	}
1440 
1441 	return 0;
1442 }
1443 
1444 int idxd_setup_sysfs(struct idxd_device *idxd)
1445 {
1446 	struct device *dev = &idxd->pdev->dev;
1447 	int rc;
1448 
1449 	rc = idxd_setup_device_sysfs(idxd);
1450 	if (rc < 0) {
1451 		dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1452 		return rc;
1453 	}
1454 
1455 	rc = idxd_setup_wq_sysfs(idxd);
1456 	if (rc < 0) {
1457 		/* unregister conf dev */
1458 		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1459 		return rc;
1460 	}
1461 
1462 	rc = idxd_setup_group_sysfs(idxd);
1463 	if (rc < 0) {
1464 		/* unregister conf dev */
1465 		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1466 		return rc;
1467 	}
1468 
1469 	rc = idxd_setup_engine_sysfs(idxd);
1470 	if (rc < 0) {
1471 		/* unregister conf dev */
1472 		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1473 		return rc;
1474 	}
1475 
1476 	return 0;
1477 }
1478 
1479 void idxd_cleanup_sysfs(struct idxd_device *idxd)
1480 {
1481 	int i;
1482 
1483 	for (i = 0; i < idxd->max_wqs; i++) {
1484 		struct idxd_wq *wq = &idxd->wqs[i];
1485 
1486 		device_unregister(&wq->conf_dev);
1487 	}
1488 
1489 	for (i = 0; i < idxd->max_engines; i++) {
1490 		struct idxd_engine *engine = &idxd->engines[i];
1491 
1492 		device_unregister(&engine->conf_dev);
1493 	}
1494 
1495 	for (i = 0; i < idxd->max_groups; i++) {
1496 		struct idxd_group *group = &idxd->groups[i];
1497 
1498 		device_unregister(&group->conf_dev);
1499 	}
1500 
1501 	device_unregister(&idxd->conf_dev);
1502 }
1503 
1504 int idxd_register_bus_type(void)
1505 {
1506 	int i, rc;
1507 
1508 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
1509 		rc = bus_register(idxd_bus_types[i]);
1510 		if (rc < 0)
1511 			goto bus_err;
1512 	}
1513 
1514 	return 0;
1515 
1516 bus_err:
1517 	for (; i > 0; i--)
1518 		bus_unregister(idxd_bus_types[i]);
1519 	return rc;
1520 }
1521 
1522 void idxd_unregister_bus_type(void)
1523 {
1524 	int i;
1525 
1526 	for (i = 0; i < IDXD_TYPE_MAX; i++)
1527 		bus_unregister(idxd_bus_types[i]);
1528 }
1529