xref: /openbmc/linux/drivers/dma/idxd/sysfs.c (revision 808643ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 static int idxd_config_bus_match(struct device *dev,
20 				 struct device_driver *drv)
21 {
22 	int matched = 0;
23 
24 	if (is_idxd_dev(dev)) {
25 		struct idxd_device *idxd = confdev_to_idxd(dev);
26 
27 		if (idxd->state != IDXD_DEV_CONF_READY)
28 			return 0;
29 		matched = 1;
30 	} else if (is_idxd_wq_dev(dev)) {
31 		struct idxd_wq *wq = confdev_to_wq(dev);
32 		struct idxd_device *idxd = wq->idxd;
33 
34 		if (idxd->state < IDXD_DEV_CONF_READY)
35 			return 0;
36 
37 		if (wq->state != IDXD_WQ_DISABLED) {
38 			dev_dbg(dev, "%s not disabled\n", dev_name(dev));
39 			return 0;
40 		}
41 		matched = 1;
42 	}
43 
44 	if (matched)
45 		dev_dbg(dev, "%s matched\n", dev_name(dev));
46 
47 	return matched;
48 }
49 
50 static int enable_wq(struct idxd_wq *wq)
51 {
52 	struct idxd_device *idxd = wq->idxd;
53 	struct device *dev = &idxd->pdev->dev;
54 	unsigned long flags;
55 	int rc;
56 
57 	mutex_lock(&wq->wq_lock);
58 
59 	if (idxd->state != IDXD_DEV_ENABLED) {
60 		mutex_unlock(&wq->wq_lock);
61 		dev_warn(dev, "Enabling while device not enabled.\n");
62 		return -EPERM;
63 	}
64 
65 	if (wq->state != IDXD_WQ_DISABLED) {
66 		mutex_unlock(&wq->wq_lock);
67 		dev_warn(dev, "WQ %d already enabled.\n", wq->id);
68 		return -EBUSY;
69 	}
70 
71 	if (!wq->group) {
72 		mutex_unlock(&wq->wq_lock);
73 		dev_warn(dev, "WQ not attached to group.\n");
74 		return -EINVAL;
75 	}
76 
77 	if (strlen(wq->name) == 0) {
78 		mutex_unlock(&wq->wq_lock);
79 		dev_warn(dev, "WQ name not set.\n");
80 		return -EINVAL;
81 	}
82 
83 	/* Shared WQ checks */
84 	if (wq_shared(wq)) {
85 		if (!device_swq_supported(idxd)) {
86 			dev_warn(dev, "PASID not enabled and shared WQ.\n");
87 			mutex_unlock(&wq->wq_lock);
88 			return -ENXIO;
89 		}
90 		/*
91 		 * Shared wq with the threshold set to 0 means the user
92 		 * did not set the threshold or transitioned from a
93 		 * dedicated wq but did not set threshold. A value
94 		 * of 0 would effectively disable the shared wq. The
95 		 * driver does not allow a value of 0 to be set for
96 		 * threshold via sysfs.
97 		 */
98 		if (wq->threshold == 0) {
99 			dev_warn(dev, "Shared WQ and threshold 0.\n");
100 			mutex_unlock(&wq->wq_lock);
101 			return -EINVAL;
102 		}
103 	}
104 
105 	rc = idxd_wq_alloc_resources(wq);
106 	if (rc < 0) {
107 		mutex_unlock(&wq->wq_lock);
108 		dev_warn(dev, "WQ resource alloc failed\n");
109 		return rc;
110 	}
111 
112 	spin_lock_irqsave(&idxd->dev_lock, flags);
113 	if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
114 		rc = idxd_device_config(idxd);
115 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
116 	if (rc < 0) {
117 		mutex_unlock(&wq->wq_lock);
118 		dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
119 		return rc;
120 	}
121 
122 	rc = idxd_wq_enable(wq);
123 	if (rc < 0) {
124 		mutex_unlock(&wq->wq_lock);
125 		dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
126 		return rc;
127 	}
128 
129 	rc = idxd_wq_map_portal(wq);
130 	if (rc < 0) {
131 		dev_warn(dev, "wq portal mapping failed: %d\n", rc);
132 		rc = idxd_wq_disable(wq);
133 		if (rc < 0)
134 			dev_warn(dev, "IDXD wq disable failed\n");
135 		mutex_unlock(&wq->wq_lock);
136 		return rc;
137 	}
138 
139 	wq->client_count = 0;
140 
141 	if (wq->type == IDXD_WQT_KERNEL) {
142 		rc = idxd_wq_init_percpu_ref(wq);
143 		if (rc < 0) {
144 			dev_dbg(dev, "percpu_ref setup failed\n");
145 			mutex_unlock(&wq->wq_lock);
146 			return rc;
147 		}
148 	}
149 
150 	if (is_idxd_wq_dmaengine(wq)) {
151 		rc = idxd_register_dma_channel(wq);
152 		if (rc < 0) {
153 			dev_dbg(dev, "DMA channel register failed\n");
154 			mutex_unlock(&wq->wq_lock);
155 			return rc;
156 		}
157 	} else if (is_idxd_wq_cdev(wq)) {
158 		rc = idxd_wq_add_cdev(wq);
159 		if (rc < 0) {
160 			dev_dbg(dev, "Cdev creation failed\n");
161 			mutex_unlock(&wq->wq_lock);
162 			return rc;
163 		}
164 	}
165 
166 	mutex_unlock(&wq->wq_lock);
167 	dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
168 
169 	return 0;
170 }
171 
172 static int idxd_config_bus_probe(struct device *dev)
173 {
174 	int rc = 0;
175 	unsigned long flags;
176 
177 	dev_dbg(dev, "%s called\n", __func__);
178 
179 	if (is_idxd_dev(dev)) {
180 		struct idxd_device *idxd = confdev_to_idxd(dev);
181 
182 		if (idxd->state != IDXD_DEV_CONF_READY) {
183 			dev_warn(dev, "Device not ready for config\n");
184 			return -EBUSY;
185 		}
186 
187 		if (!try_module_get(THIS_MODULE))
188 			return -ENXIO;
189 
190 		/* Perform IDXD configuration and enabling */
191 		spin_lock_irqsave(&idxd->dev_lock, flags);
192 		if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
193 			rc = idxd_device_config(idxd);
194 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
195 		if (rc < 0) {
196 			module_put(THIS_MODULE);
197 			dev_warn(dev, "Device config failed: %d\n", rc);
198 			return rc;
199 		}
200 
201 		/* start device */
202 		rc = idxd_device_enable(idxd);
203 		if (rc < 0) {
204 			module_put(THIS_MODULE);
205 			dev_warn(dev, "Device enable failed: %d\n", rc);
206 			return rc;
207 		}
208 
209 		dev_info(dev, "Device %s enabled\n", dev_name(dev));
210 
211 		rc = idxd_register_dma_device(idxd);
212 		if (rc < 0) {
213 			module_put(THIS_MODULE);
214 			dev_dbg(dev, "Failed to register dmaengine device\n");
215 			return rc;
216 		}
217 		return 0;
218 	} else if (is_idxd_wq_dev(dev)) {
219 		struct idxd_wq *wq = confdev_to_wq(dev);
220 
221 		return enable_wq(wq);
222 	}
223 
224 	return -ENODEV;
225 }
226 
227 static void disable_wq(struct idxd_wq *wq)
228 {
229 	struct idxd_device *idxd = wq->idxd;
230 	struct device *dev = &idxd->pdev->dev;
231 
232 	mutex_lock(&wq->wq_lock);
233 	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
234 	if (wq->state == IDXD_WQ_DISABLED) {
235 		mutex_unlock(&wq->wq_lock);
236 		return;
237 	}
238 
239 	if (wq->type == IDXD_WQT_KERNEL)
240 		idxd_wq_quiesce(wq);
241 
242 	if (is_idxd_wq_dmaengine(wq))
243 		idxd_unregister_dma_channel(wq);
244 	else if (is_idxd_wq_cdev(wq))
245 		idxd_wq_del_cdev(wq);
246 
247 	if (idxd_wq_refcount(wq))
248 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
249 			 wq->id, idxd_wq_refcount(wq));
250 
251 	idxd_wq_unmap_portal(wq);
252 
253 	idxd_wq_drain(wq);
254 	idxd_wq_reset(wq);
255 
256 	idxd_wq_free_resources(wq);
257 	wq->client_count = 0;
258 	mutex_unlock(&wq->wq_lock);
259 
260 	dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
261 }
262 
263 static void idxd_config_bus_remove(struct device *dev)
264 {
265 	int rc;
266 
267 	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
268 
269 	/* disable workqueue here */
270 	if (is_idxd_wq_dev(dev)) {
271 		struct idxd_wq *wq = confdev_to_wq(dev);
272 
273 		disable_wq(wq);
274 	} else if (is_idxd_dev(dev)) {
275 		struct idxd_device *idxd = confdev_to_idxd(dev);
276 		int i;
277 
278 		dev_dbg(dev, "%s removing dev %s\n", __func__,
279 			dev_name(&idxd->conf_dev));
280 		for (i = 0; i < idxd->max_wqs; i++) {
281 			struct idxd_wq *wq = idxd->wqs[i];
282 
283 			if (wq->state == IDXD_WQ_DISABLED)
284 				continue;
285 			dev_warn(dev, "Active wq %d on disable %s.\n", i,
286 				 dev_name(&idxd->conf_dev));
287 			device_release_driver(&wq->conf_dev);
288 		}
289 
290 		idxd_unregister_dma_device(idxd);
291 		rc = idxd_device_disable(idxd);
292 		if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
293 			for (i = 0; i < idxd->max_wqs; i++) {
294 				struct idxd_wq *wq = idxd->wqs[i];
295 
296 				mutex_lock(&wq->wq_lock);
297 				idxd_wq_disable_cleanup(wq);
298 				mutex_unlock(&wq->wq_lock);
299 			}
300 		}
301 		module_put(THIS_MODULE);
302 		if (rc < 0)
303 			dev_warn(dev, "Device disable failed\n");
304 		else
305 			dev_info(dev, "Device %s disabled\n", dev_name(dev));
306 
307 	}
308 }
309 
310 static void idxd_config_bus_shutdown(struct device *dev)
311 {
312 	dev_dbg(dev, "%s called\n", __func__);
313 }
314 
315 struct bus_type dsa_bus_type = {
316 	.name = "dsa",
317 	.match = idxd_config_bus_match,
318 	.probe = idxd_config_bus_probe,
319 	.remove = idxd_config_bus_remove,
320 	.shutdown = idxd_config_bus_shutdown,
321 };
322 
323 static struct idxd_device_driver dsa_drv = {
324 	.drv = {
325 		.name = "dsa",
326 		.bus = &dsa_bus_type,
327 		.owner = THIS_MODULE,
328 		.mod_name = KBUILD_MODNAME,
329 	},
330 };
331 
332 /* IDXD generic driver setup */
333 int idxd_register_driver(void)
334 {
335 	return driver_register(&dsa_drv.drv);
336 }
337 
338 void idxd_unregister_driver(void)
339 {
340 	driver_unregister(&dsa_drv.drv);
341 }
342 
343 /* IDXD engine attributes */
344 static ssize_t engine_group_id_show(struct device *dev,
345 				    struct device_attribute *attr, char *buf)
346 {
347 	struct idxd_engine *engine =
348 		container_of(dev, struct idxd_engine, conf_dev);
349 
350 	if (engine->group)
351 		return sysfs_emit(buf, "%d\n", engine->group->id);
352 	else
353 		return sysfs_emit(buf, "%d\n", -1);
354 }
355 
356 static ssize_t engine_group_id_store(struct device *dev,
357 				     struct device_attribute *attr,
358 				     const char *buf, size_t count)
359 {
360 	struct idxd_engine *engine =
361 		container_of(dev, struct idxd_engine, conf_dev);
362 	struct idxd_device *idxd = engine->idxd;
363 	long id;
364 	int rc;
365 	struct idxd_group *prevg;
366 
367 	rc = kstrtol(buf, 10, &id);
368 	if (rc < 0)
369 		return -EINVAL;
370 
371 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
372 		return -EPERM;
373 
374 	if (id > idxd->max_groups - 1 || id < -1)
375 		return -EINVAL;
376 
377 	if (id == -1) {
378 		if (engine->group) {
379 			engine->group->num_engines--;
380 			engine->group = NULL;
381 		}
382 		return count;
383 	}
384 
385 	prevg = engine->group;
386 
387 	if (prevg)
388 		prevg->num_engines--;
389 	engine->group = idxd->groups[id];
390 	engine->group->num_engines++;
391 
392 	return count;
393 }
394 
395 static struct device_attribute dev_attr_engine_group =
396 		__ATTR(group_id, 0644, engine_group_id_show,
397 		       engine_group_id_store);
398 
399 static struct attribute *idxd_engine_attributes[] = {
400 	&dev_attr_engine_group.attr,
401 	NULL,
402 };
403 
404 static const struct attribute_group idxd_engine_attribute_group = {
405 	.attrs = idxd_engine_attributes,
406 };
407 
408 static const struct attribute_group *idxd_engine_attribute_groups[] = {
409 	&idxd_engine_attribute_group,
410 	NULL,
411 };
412 
413 static void idxd_conf_engine_release(struct device *dev)
414 {
415 	struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
416 
417 	kfree(engine);
418 }
419 
420 struct device_type idxd_engine_device_type = {
421 	.name = "engine",
422 	.release = idxd_conf_engine_release,
423 	.groups = idxd_engine_attribute_groups,
424 };
425 
426 /* Group attributes */
427 
428 static void idxd_set_free_tokens(struct idxd_device *idxd)
429 {
430 	int i, tokens;
431 
432 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
433 		struct idxd_group *g = idxd->groups[i];
434 
435 		tokens += g->tokens_reserved;
436 	}
437 
438 	idxd->nr_tokens = idxd->max_tokens - tokens;
439 }
440 
441 static ssize_t group_tokens_reserved_show(struct device *dev,
442 					  struct device_attribute *attr,
443 					  char *buf)
444 {
445 	struct idxd_group *group =
446 		container_of(dev, struct idxd_group, conf_dev);
447 
448 	return sysfs_emit(buf, "%u\n", group->tokens_reserved);
449 }
450 
451 static ssize_t group_tokens_reserved_store(struct device *dev,
452 					   struct device_attribute *attr,
453 					   const char *buf, size_t count)
454 {
455 	struct idxd_group *group =
456 		container_of(dev, struct idxd_group, conf_dev);
457 	struct idxd_device *idxd = group->idxd;
458 	unsigned long val;
459 	int rc;
460 
461 	rc = kstrtoul(buf, 10, &val);
462 	if (rc < 0)
463 		return -EINVAL;
464 
465 	if (idxd->data->type == IDXD_TYPE_IAX)
466 		return -EOPNOTSUPP;
467 
468 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
469 		return -EPERM;
470 
471 	if (idxd->state == IDXD_DEV_ENABLED)
472 		return -EPERM;
473 
474 	if (val > idxd->max_tokens)
475 		return -EINVAL;
476 
477 	if (val > idxd->nr_tokens + group->tokens_reserved)
478 		return -EINVAL;
479 
480 	group->tokens_reserved = val;
481 	idxd_set_free_tokens(idxd);
482 	return count;
483 }
484 
485 static struct device_attribute dev_attr_group_tokens_reserved =
486 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
487 		       group_tokens_reserved_store);
488 
489 static ssize_t group_tokens_allowed_show(struct device *dev,
490 					 struct device_attribute *attr,
491 					 char *buf)
492 {
493 	struct idxd_group *group =
494 		container_of(dev, struct idxd_group, conf_dev);
495 
496 	return sysfs_emit(buf, "%u\n", group->tokens_allowed);
497 }
498 
499 static ssize_t group_tokens_allowed_store(struct device *dev,
500 					  struct device_attribute *attr,
501 					  const char *buf, size_t count)
502 {
503 	struct idxd_group *group =
504 		container_of(dev, struct idxd_group, conf_dev);
505 	struct idxd_device *idxd = group->idxd;
506 	unsigned long val;
507 	int rc;
508 
509 	rc = kstrtoul(buf, 10, &val);
510 	if (rc < 0)
511 		return -EINVAL;
512 
513 	if (idxd->data->type == IDXD_TYPE_IAX)
514 		return -EOPNOTSUPP;
515 
516 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
517 		return -EPERM;
518 
519 	if (idxd->state == IDXD_DEV_ENABLED)
520 		return -EPERM;
521 
522 	if (val < 4 * group->num_engines ||
523 	    val > group->tokens_reserved + idxd->nr_tokens)
524 		return -EINVAL;
525 
526 	group->tokens_allowed = val;
527 	return count;
528 }
529 
530 static struct device_attribute dev_attr_group_tokens_allowed =
531 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
532 		       group_tokens_allowed_store);
533 
534 static ssize_t group_use_token_limit_show(struct device *dev,
535 					  struct device_attribute *attr,
536 					  char *buf)
537 {
538 	struct idxd_group *group =
539 		container_of(dev, struct idxd_group, conf_dev);
540 
541 	return sysfs_emit(buf, "%u\n", group->use_token_limit);
542 }
543 
544 static ssize_t group_use_token_limit_store(struct device *dev,
545 					   struct device_attribute *attr,
546 					   const char *buf, size_t count)
547 {
548 	struct idxd_group *group =
549 		container_of(dev, struct idxd_group, conf_dev);
550 	struct idxd_device *idxd = group->idxd;
551 	unsigned long val;
552 	int rc;
553 
554 	rc = kstrtoul(buf, 10, &val);
555 	if (rc < 0)
556 		return -EINVAL;
557 
558 	if (idxd->data->type == IDXD_TYPE_IAX)
559 		return -EOPNOTSUPP;
560 
561 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
562 		return -EPERM;
563 
564 	if (idxd->state == IDXD_DEV_ENABLED)
565 		return -EPERM;
566 
567 	if (idxd->token_limit == 0)
568 		return -EPERM;
569 
570 	group->use_token_limit = !!val;
571 	return count;
572 }
573 
574 static struct device_attribute dev_attr_group_use_token_limit =
575 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
576 		       group_use_token_limit_store);
577 
578 static ssize_t group_engines_show(struct device *dev,
579 				  struct device_attribute *attr, char *buf)
580 {
581 	struct idxd_group *group =
582 		container_of(dev, struct idxd_group, conf_dev);
583 	int i, rc = 0;
584 	struct idxd_device *idxd = group->idxd;
585 
586 	for (i = 0; i < idxd->max_engines; i++) {
587 		struct idxd_engine *engine = idxd->engines[i];
588 
589 		if (!engine->group)
590 			continue;
591 
592 		if (engine->group->id == group->id)
593 			rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
594 	}
595 
596 	if (!rc)
597 		return 0;
598 	rc--;
599 	rc += sysfs_emit_at(buf, rc, "\n");
600 
601 	return rc;
602 }
603 
604 static struct device_attribute dev_attr_group_engines =
605 		__ATTR(engines, 0444, group_engines_show, NULL);
606 
607 static ssize_t group_work_queues_show(struct device *dev,
608 				      struct device_attribute *attr, char *buf)
609 {
610 	struct idxd_group *group =
611 		container_of(dev, struct idxd_group, conf_dev);
612 	int i, rc = 0;
613 	struct idxd_device *idxd = group->idxd;
614 
615 	for (i = 0; i < idxd->max_wqs; i++) {
616 		struct idxd_wq *wq = idxd->wqs[i];
617 
618 		if (!wq->group)
619 			continue;
620 
621 		if (wq->group->id == group->id)
622 			rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
623 	}
624 
625 	if (!rc)
626 		return 0;
627 	rc--;
628 	rc += sysfs_emit_at(buf, rc, "\n");
629 
630 	return rc;
631 }
632 
633 static struct device_attribute dev_attr_group_work_queues =
634 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
635 
636 static ssize_t group_traffic_class_a_show(struct device *dev,
637 					  struct device_attribute *attr,
638 					  char *buf)
639 {
640 	struct idxd_group *group =
641 		container_of(dev, struct idxd_group, conf_dev);
642 
643 	return sysfs_emit(buf, "%d\n", group->tc_a);
644 }
645 
646 static ssize_t group_traffic_class_a_store(struct device *dev,
647 					   struct device_attribute *attr,
648 					   const char *buf, size_t count)
649 {
650 	struct idxd_group *group =
651 		container_of(dev, struct idxd_group, conf_dev);
652 	struct idxd_device *idxd = group->idxd;
653 	long val;
654 	int rc;
655 
656 	rc = kstrtol(buf, 10, &val);
657 	if (rc < 0)
658 		return -EINVAL;
659 
660 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
661 		return -EPERM;
662 
663 	if (idxd->state == IDXD_DEV_ENABLED)
664 		return -EPERM;
665 
666 	if (val < 0 || val > 7)
667 		return -EINVAL;
668 
669 	group->tc_a = val;
670 	return count;
671 }
672 
673 static struct device_attribute dev_attr_group_traffic_class_a =
674 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
675 		       group_traffic_class_a_store);
676 
677 static ssize_t group_traffic_class_b_show(struct device *dev,
678 					  struct device_attribute *attr,
679 					  char *buf)
680 {
681 	struct idxd_group *group =
682 		container_of(dev, struct idxd_group, conf_dev);
683 
684 	return sysfs_emit(buf, "%d\n", group->tc_b);
685 }
686 
687 static ssize_t group_traffic_class_b_store(struct device *dev,
688 					   struct device_attribute *attr,
689 					   const char *buf, size_t count)
690 {
691 	struct idxd_group *group =
692 		container_of(dev, struct idxd_group, conf_dev);
693 	struct idxd_device *idxd = group->idxd;
694 	long val;
695 	int rc;
696 
697 	rc = kstrtol(buf, 10, &val);
698 	if (rc < 0)
699 		return -EINVAL;
700 
701 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
702 		return -EPERM;
703 
704 	if (idxd->state == IDXD_DEV_ENABLED)
705 		return -EPERM;
706 
707 	if (val < 0 || val > 7)
708 		return -EINVAL;
709 
710 	group->tc_b = val;
711 	return count;
712 }
713 
714 static struct device_attribute dev_attr_group_traffic_class_b =
715 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
716 		       group_traffic_class_b_store);
717 
718 static struct attribute *idxd_group_attributes[] = {
719 	&dev_attr_group_work_queues.attr,
720 	&dev_attr_group_engines.attr,
721 	&dev_attr_group_use_token_limit.attr,
722 	&dev_attr_group_tokens_allowed.attr,
723 	&dev_attr_group_tokens_reserved.attr,
724 	&dev_attr_group_traffic_class_a.attr,
725 	&dev_attr_group_traffic_class_b.attr,
726 	NULL,
727 };
728 
729 static const struct attribute_group idxd_group_attribute_group = {
730 	.attrs = idxd_group_attributes,
731 };
732 
733 static const struct attribute_group *idxd_group_attribute_groups[] = {
734 	&idxd_group_attribute_group,
735 	NULL,
736 };
737 
738 static void idxd_conf_group_release(struct device *dev)
739 {
740 	struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
741 
742 	kfree(group);
743 }
744 
745 struct device_type idxd_group_device_type = {
746 	.name = "group",
747 	.release = idxd_conf_group_release,
748 	.groups = idxd_group_attribute_groups,
749 };
750 
751 /* IDXD work queue attribs */
752 static ssize_t wq_clients_show(struct device *dev,
753 			       struct device_attribute *attr, char *buf)
754 {
755 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
756 
757 	return sysfs_emit(buf, "%d\n", wq->client_count);
758 }
759 
760 static struct device_attribute dev_attr_wq_clients =
761 		__ATTR(clients, 0444, wq_clients_show, NULL);
762 
763 static ssize_t wq_state_show(struct device *dev,
764 			     struct device_attribute *attr, char *buf)
765 {
766 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
767 
768 	switch (wq->state) {
769 	case IDXD_WQ_DISABLED:
770 		return sysfs_emit(buf, "disabled\n");
771 	case IDXD_WQ_ENABLED:
772 		return sysfs_emit(buf, "enabled\n");
773 	}
774 
775 	return sysfs_emit(buf, "unknown\n");
776 }
777 
778 static struct device_attribute dev_attr_wq_state =
779 		__ATTR(state, 0444, wq_state_show, NULL);
780 
781 static ssize_t wq_group_id_show(struct device *dev,
782 				struct device_attribute *attr, char *buf)
783 {
784 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
785 
786 	if (wq->group)
787 		return sysfs_emit(buf, "%u\n", wq->group->id);
788 	else
789 		return sysfs_emit(buf, "-1\n");
790 }
791 
792 static ssize_t wq_group_id_store(struct device *dev,
793 				 struct device_attribute *attr,
794 				 const char *buf, size_t count)
795 {
796 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
797 	struct idxd_device *idxd = wq->idxd;
798 	long id;
799 	int rc;
800 	struct idxd_group *prevg, *group;
801 
802 	rc = kstrtol(buf, 10, &id);
803 	if (rc < 0)
804 		return -EINVAL;
805 
806 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
807 		return -EPERM;
808 
809 	if (wq->state != IDXD_WQ_DISABLED)
810 		return -EPERM;
811 
812 	if (id > idxd->max_groups - 1 || id < -1)
813 		return -EINVAL;
814 
815 	if (id == -1) {
816 		if (wq->group) {
817 			wq->group->num_wqs--;
818 			wq->group = NULL;
819 		}
820 		return count;
821 	}
822 
823 	group = idxd->groups[id];
824 	prevg = wq->group;
825 
826 	if (prevg)
827 		prevg->num_wqs--;
828 	wq->group = group;
829 	group->num_wqs++;
830 	return count;
831 }
832 
833 static struct device_attribute dev_attr_wq_group_id =
834 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
835 
836 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
837 			    char *buf)
838 {
839 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
840 
841 	return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
842 }
843 
844 static ssize_t wq_mode_store(struct device *dev,
845 			     struct device_attribute *attr, const char *buf,
846 			     size_t count)
847 {
848 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
849 	struct idxd_device *idxd = wq->idxd;
850 
851 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
852 		return -EPERM;
853 
854 	if (wq->state != IDXD_WQ_DISABLED)
855 		return -EPERM;
856 
857 	if (sysfs_streq(buf, "dedicated")) {
858 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
859 		wq->threshold = 0;
860 	} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
861 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
862 	} else {
863 		return -EINVAL;
864 	}
865 
866 	return count;
867 }
868 
869 static struct device_attribute dev_attr_wq_mode =
870 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
871 
872 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
873 			    char *buf)
874 {
875 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
876 
877 	return sysfs_emit(buf, "%u\n", wq->size);
878 }
879 
880 static int total_claimed_wq_size(struct idxd_device *idxd)
881 {
882 	int i;
883 	int wq_size = 0;
884 
885 	for (i = 0; i < idxd->max_wqs; i++) {
886 		struct idxd_wq *wq = idxd->wqs[i];
887 
888 		wq_size += wq->size;
889 	}
890 
891 	return wq_size;
892 }
893 
894 static ssize_t wq_size_store(struct device *dev,
895 			     struct device_attribute *attr, const char *buf,
896 			     size_t count)
897 {
898 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
899 	unsigned long size;
900 	struct idxd_device *idxd = wq->idxd;
901 	int rc;
902 
903 	rc = kstrtoul(buf, 10, &size);
904 	if (rc < 0)
905 		return -EINVAL;
906 
907 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
908 		return -EPERM;
909 
910 	if (idxd->state == IDXD_DEV_ENABLED)
911 		return -EPERM;
912 
913 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
914 		return -EINVAL;
915 
916 	wq->size = size;
917 	return count;
918 }
919 
920 static struct device_attribute dev_attr_wq_size =
921 		__ATTR(size, 0644, wq_size_show, wq_size_store);
922 
923 static ssize_t wq_priority_show(struct device *dev,
924 				struct device_attribute *attr, char *buf)
925 {
926 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
927 
928 	return sysfs_emit(buf, "%u\n", wq->priority);
929 }
930 
931 static ssize_t wq_priority_store(struct device *dev,
932 				 struct device_attribute *attr,
933 				 const char *buf, size_t count)
934 {
935 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
936 	unsigned long prio;
937 	struct idxd_device *idxd = wq->idxd;
938 	int rc;
939 
940 	rc = kstrtoul(buf, 10, &prio);
941 	if (rc < 0)
942 		return -EINVAL;
943 
944 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
945 		return -EPERM;
946 
947 	if (wq->state != IDXD_WQ_DISABLED)
948 		return -EPERM;
949 
950 	if (prio > IDXD_MAX_PRIORITY)
951 		return -EINVAL;
952 
953 	wq->priority = prio;
954 	return count;
955 }
956 
957 static struct device_attribute dev_attr_wq_priority =
958 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
959 
960 static ssize_t wq_block_on_fault_show(struct device *dev,
961 				      struct device_attribute *attr, char *buf)
962 {
963 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
964 
965 	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
966 }
967 
968 static ssize_t wq_block_on_fault_store(struct device *dev,
969 				       struct device_attribute *attr,
970 				       const char *buf, size_t count)
971 {
972 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
973 	struct idxd_device *idxd = wq->idxd;
974 	bool bof;
975 	int rc;
976 
977 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
978 		return -EPERM;
979 
980 	if (wq->state != IDXD_WQ_DISABLED)
981 		return -ENXIO;
982 
983 	rc = kstrtobool(buf, &bof);
984 	if (rc < 0)
985 		return rc;
986 
987 	if (bof)
988 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
989 	else
990 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
991 
992 	return count;
993 }
994 
995 static struct device_attribute dev_attr_wq_block_on_fault =
996 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
997 		       wq_block_on_fault_store);
998 
999 static ssize_t wq_threshold_show(struct device *dev,
1000 				 struct device_attribute *attr, char *buf)
1001 {
1002 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1003 
1004 	return sysfs_emit(buf, "%u\n", wq->threshold);
1005 }
1006 
1007 static ssize_t wq_threshold_store(struct device *dev,
1008 				  struct device_attribute *attr,
1009 				  const char *buf, size_t count)
1010 {
1011 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1012 	struct idxd_device *idxd = wq->idxd;
1013 	unsigned int val;
1014 	int rc;
1015 
1016 	rc = kstrtouint(buf, 0, &val);
1017 	if (rc < 0)
1018 		return -EINVAL;
1019 
1020 	if (val > wq->size || val <= 0)
1021 		return -EINVAL;
1022 
1023 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1024 		return -EPERM;
1025 
1026 	if (wq->state != IDXD_WQ_DISABLED)
1027 		return -ENXIO;
1028 
1029 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1030 		return -EINVAL;
1031 
1032 	wq->threshold = val;
1033 
1034 	return count;
1035 }
1036 
1037 static struct device_attribute dev_attr_wq_threshold =
1038 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1039 
1040 static ssize_t wq_type_show(struct device *dev,
1041 			    struct device_attribute *attr, char *buf)
1042 {
1043 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1044 
1045 	switch (wq->type) {
1046 	case IDXD_WQT_KERNEL:
1047 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
1048 	case IDXD_WQT_USER:
1049 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
1050 	case IDXD_WQT_NONE:
1051 	default:
1052 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
1053 	}
1054 
1055 	return -EINVAL;
1056 }
1057 
1058 static ssize_t wq_type_store(struct device *dev,
1059 			     struct device_attribute *attr, const char *buf,
1060 			     size_t count)
1061 {
1062 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1063 	enum idxd_wq_type old_type;
1064 
1065 	if (wq->state != IDXD_WQ_DISABLED)
1066 		return -EPERM;
1067 
1068 	old_type = wq->type;
1069 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1070 		wq->type = IDXD_WQT_NONE;
1071 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1072 		wq->type = IDXD_WQT_KERNEL;
1073 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1074 		wq->type = IDXD_WQT_USER;
1075 	else
1076 		return -EINVAL;
1077 
1078 	/* If we are changing queue type, clear the name */
1079 	if (wq->type != old_type)
1080 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
1081 
1082 	return count;
1083 }
1084 
1085 static struct device_attribute dev_attr_wq_type =
1086 		__ATTR(type, 0644, wq_type_show, wq_type_store);
1087 
1088 static ssize_t wq_name_show(struct device *dev,
1089 			    struct device_attribute *attr, char *buf)
1090 {
1091 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1092 
1093 	return sysfs_emit(buf, "%s\n", wq->name);
1094 }
1095 
1096 static ssize_t wq_name_store(struct device *dev,
1097 			     struct device_attribute *attr, const char *buf,
1098 			     size_t count)
1099 {
1100 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1101 
1102 	if (wq->state != IDXD_WQ_DISABLED)
1103 		return -EPERM;
1104 
1105 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1106 		return -EINVAL;
1107 
1108 	/*
1109 	 * This is temporarily placed here until we have SVM support for
1110 	 * dmaengine.
1111 	 */
1112 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1113 		return -EOPNOTSUPP;
1114 
1115 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
1116 	strncpy(wq->name, buf, WQ_NAME_SIZE);
1117 	strreplace(wq->name, '\n', '\0');
1118 	return count;
1119 }
1120 
1121 static struct device_attribute dev_attr_wq_name =
1122 		__ATTR(name, 0644, wq_name_show, wq_name_store);
1123 
1124 static ssize_t wq_cdev_minor_show(struct device *dev,
1125 				  struct device_attribute *attr, char *buf)
1126 {
1127 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1128 	int minor = -1;
1129 
1130 	mutex_lock(&wq->wq_lock);
1131 	if (wq->idxd_cdev)
1132 		minor = wq->idxd_cdev->minor;
1133 	mutex_unlock(&wq->wq_lock);
1134 
1135 	if (minor == -1)
1136 		return -ENXIO;
1137 	return sysfs_emit(buf, "%d\n", minor);
1138 }
1139 
1140 static struct device_attribute dev_attr_wq_cdev_minor =
1141 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1142 
1143 static int __get_sysfs_u64(const char *buf, u64 *val)
1144 {
1145 	int rc;
1146 
1147 	rc = kstrtou64(buf, 0, val);
1148 	if (rc < 0)
1149 		return -EINVAL;
1150 
1151 	if (*val == 0)
1152 		return -EINVAL;
1153 
1154 	*val = roundup_pow_of_two(*val);
1155 	return 0;
1156 }
1157 
1158 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1159 					 char *buf)
1160 {
1161 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1162 
1163 	return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1164 }
1165 
1166 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1167 					  const char *buf, size_t count)
1168 {
1169 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1170 	struct idxd_device *idxd = wq->idxd;
1171 	u64 xfer_size;
1172 	int rc;
1173 
1174 	if (wq->state != IDXD_WQ_DISABLED)
1175 		return -EPERM;
1176 
1177 	rc = __get_sysfs_u64(buf, &xfer_size);
1178 	if (rc < 0)
1179 		return rc;
1180 
1181 	if (xfer_size > idxd->max_xfer_bytes)
1182 		return -EINVAL;
1183 
1184 	wq->max_xfer_bytes = xfer_size;
1185 
1186 	return count;
1187 }
1188 
1189 static struct device_attribute dev_attr_wq_max_transfer_size =
1190 		__ATTR(max_transfer_size, 0644,
1191 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
1192 
1193 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1194 {
1195 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1196 
1197 	return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1198 }
1199 
1200 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1201 				       const char *buf, size_t count)
1202 {
1203 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1204 	struct idxd_device *idxd = wq->idxd;
1205 	u64 batch_size;
1206 	int rc;
1207 
1208 	if (wq->state != IDXD_WQ_DISABLED)
1209 		return -EPERM;
1210 
1211 	rc = __get_sysfs_u64(buf, &batch_size);
1212 	if (rc < 0)
1213 		return rc;
1214 
1215 	if (batch_size > idxd->max_batch_size)
1216 		return -EINVAL;
1217 
1218 	wq->max_batch_size = (u32)batch_size;
1219 
1220 	return count;
1221 }
1222 
1223 static struct device_attribute dev_attr_wq_max_batch_size =
1224 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1225 
1226 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1227 {
1228 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1229 
1230 	return sysfs_emit(buf, "%u\n", wq->ats_dis);
1231 }
1232 
1233 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1234 				    const char *buf, size_t count)
1235 {
1236 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1237 	struct idxd_device *idxd = wq->idxd;
1238 	bool ats_dis;
1239 	int rc;
1240 
1241 	if (wq->state != IDXD_WQ_DISABLED)
1242 		return -EPERM;
1243 
1244 	if (!idxd->hw.wq_cap.wq_ats_support)
1245 		return -EOPNOTSUPP;
1246 
1247 	rc = kstrtobool(buf, &ats_dis);
1248 	if (rc < 0)
1249 		return rc;
1250 
1251 	wq->ats_dis = ats_dis;
1252 
1253 	return count;
1254 }
1255 
1256 static struct device_attribute dev_attr_wq_ats_disable =
1257 		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1258 
1259 static struct attribute *idxd_wq_attributes[] = {
1260 	&dev_attr_wq_clients.attr,
1261 	&dev_attr_wq_state.attr,
1262 	&dev_attr_wq_group_id.attr,
1263 	&dev_attr_wq_mode.attr,
1264 	&dev_attr_wq_size.attr,
1265 	&dev_attr_wq_priority.attr,
1266 	&dev_attr_wq_block_on_fault.attr,
1267 	&dev_attr_wq_threshold.attr,
1268 	&dev_attr_wq_type.attr,
1269 	&dev_attr_wq_name.attr,
1270 	&dev_attr_wq_cdev_minor.attr,
1271 	&dev_attr_wq_max_transfer_size.attr,
1272 	&dev_attr_wq_max_batch_size.attr,
1273 	&dev_attr_wq_ats_disable.attr,
1274 	NULL,
1275 };
1276 
1277 static const struct attribute_group idxd_wq_attribute_group = {
1278 	.attrs = idxd_wq_attributes,
1279 };
1280 
1281 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1282 	&idxd_wq_attribute_group,
1283 	NULL,
1284 };
1285 
1286 static void idxd_conf_wq_release(struct device *dev)
1287 {
1288 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1289 
1290 	kfree(wq->wqcfg);
1291 	kfree(wq);
1292 }
1293 
1294 struct device_type idxd_wq_device_type = {
1295 	.name = "wq",
1296 	.release = idxd_conf_wq_release,
1297 	.groups = idxd_wq_attribute_groups,
1298 };
1299 
1300 /* IDXD device attribs */
1301 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1302 			    char *buf)
1303 {
1304 	struct idxd_device *idxd =
1305 		container_of(dev, struct idxd_device, conf_dev);
1306 
1307 	return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1308 }
1309 static DEVICE_ATTR_RO(version);
1310 
1311 static ssize_t max_work_queues_size_show(struct device *dev,
1312 					 struct device_attribute *attr,
1313 					 char *buf)
1314 {
1315 	struct idxd_device *idxd =
1316 		container_of(dev, struct idxd_device, conf_dev);
1317 
1318 	return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1319 }
1320 static DEVICE_ATTR_RO(max_work_queues_size);
1321 
1322 static ssize_t max_groups_show(struct device *dev,
1323 			       struct device_attribute *attr, char *buf)
1324 {
1325 	struct idxd_device *idxd =
1326 		container_of(dev, struct idxd_device, conf_dev);
1327 
1328 	return sysfs_emit(buf, "%u\n", idxd->max_groups);
1329 }
1330 static DEVICE_ATTR_RO(max_groups);
1331 
1332 static ssize_t max_work_queues_show(struct device *dev,
1333 				    struct device_attribute *attr, char *buf)
1334 {
1335 	struct idxd_device *idxd =
1336 		container_of(dev, struct idxd_device, conf_dev);
1337 
1338 	return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1339 }
1340 static DEVICE_ATTR_RO(max_work_queues);
1341 
1342 static ssize_t max_engines_show(struct device *dev,
1343 				struct device_attribute *attr, char *buf)
1344 {
1345 	struct idxd_device *idxd =
1346 		container_of(dev, struct idxd_device, conf_dev);
1347 
1348 	return sysfs_emit(buf, "%u\n", idxd->max_engines);
1349 }
1350 static DEVICE_ATTR_RO(max_engines);
1351 
1352 static ssize_t numa_node_show(struct device *dev,
1353 			      struct device_attribute *attr, char *buf)
1354 {
1355 	struct idxd_device *idxd =
1356 		container_of(dev, struct idxd_device, conf_dev);
1357 
1358 	return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1359 }
1360 static DEVICE_ATTR_RO(numa_node);
1361 
1362 static ssize_t max_batch_size_show(struct device *dev,
1363 				   struct device_attribute *attr, char *buf)
1364 {
1365 	struct idxd_device *idxd =
1366 		container_of(dev, struct idxd_device, conf_dev);
1367 
1368 	return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1369 }
1370 static DEVICE_ATTR_RO(max_batch_size);
1371 
1372 static ssize_t max_transfer_size_show(struct device *dev,
1373 				      struct device_attribute *attr,
1374 				      char *buf)
1375 {
1376 	struct idxd_device *idxd =
1377 		container_of(dev, struct idxd_device, conf_dev);
1378 
1379 	return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1380 }
1381 static DEVICE_ATTR_RO(max_transfer_size);
1382 
1383 static ssize_t op_cap_show(struct device *dev,
1384 			   struct device_attribute *attr, char *buf)
1385 {
1386 	struct idxd_device *idxd =
1387 		container_of(dev, struct idxd_device, conf_dev);
1388 	int i, rc = 0;
1389 
1390 	for (i = 0; i < 4; i++)
1391 		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1392 
1393 	rc--;
1394 	rc += sysfs_emit_at(buf, rc, "\n");
1395 	return rc;
1396 }
1397 static DEVICE_ATTR_RO(op_cap);
1398 
1399 static ssize_t gen_cap_show(struct device *dev,
1400 			    struct device_attribute *attr, char *buf)
1401 {
1402 	struct idxd_device *idxd =
1403 		container_of(dev, struct idxd_device, conf_dev);
1404 
1405 	return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1406 }
1407 static DEVICE_ATTR_RO(gen_cap);
1408 
1409 static ssize_t configurable_show(struct device *dev,
1410 				 struct device_attribute *attr, char *buf)
1411 {
1412 	struct idxd_device *idxd =
1413 		container_of(dev, struct idxd_device, conf_dev);
1414 
1415 	return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1416 }
1417 static DEVICE_ATTR_RO(configurable);
1418 
1419 static ssize_t clients_show(struct device *dev,
1420 			    struct device_attribute *attr, char *buf)
1421 {
1422 	struct idxd_device *idxd =
1423 		container_of(dev, struct idxd_device, conf_dev);
1424 	unsigned long flags;
1425 	int count = 0, i;
1426 
1427 	spin_lock_irqsave(&idxd->dev_lock, flags);
1428 	for (i = 0; i < idxd->max_wqs; i++) {
1429 		struct idxd_wq *wq = idxd->wqs[i];
1430 
1431 		count += wq->client_count;
1432 	}
1433 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1434 
1435 	return sysfs_emit(buf, "%d\n", count);
1436 }
1437 static DEVICE_ATTR_RO(clients);
1438 
1439 static ssize_t pasid_enabled_show(struct device *dev,
1440 				  struct device_attribute *attr, char *buf)
1441 {
1442 	struct idxd_device *idxd =
1443 		container_of(dev, struct idxd_device, conf_dev);
1444 
1445 	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1446 }
1447 static DEVICE_ATTR_RO(pasid_enabled);
1448 
1449 static ssize_t state_show(struct device *dev,
1450 			  struct device_attribute *attr, char *buf)
1451 {
1452 	struct idxd_device *idxd =
1453 		container_of(dev, struct idxd_device, conf_dev);
1454 
1455 	switch (idxd->state) {
1456 	case IDXD_DEV_DISABLED:
1457 	case IDXD_DEV_CONF_READY:
1458 		return sysfs_emit(buf, "disabled\n");
1459 	case IDXD_DEV_ENABLED:
1460 		return sysfs_emit(buf, "enabled\n");
1461 	case IDXD_DEV_HALTED:
1462 		return sysfs_emit(buf, "halted\n");
1463 	}
1464 
1465 	return sysfs_emit(buf, "unknown\n");
1466 }
1467 static DEVICE_ATTR_RO(state);
1468 
1469 static ssize_t errors_show(struct device *dev,
1470 			   struct device_attribute *attr, char *buf)
1471 {
1472 	struct idxd_device *idxd =
1473 		container_of(dev, struct idxd_device, conf_dev);
1474 	int i, out = 0;
1475 	unsigned long flags;
1476 
1477 	spin_lock_irqsave(&idxd->dev_lock, flags);
1478 	for (i = 0; i < 4; i++)
1479 		out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1480 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1481 	out--;
1482 	out += sysfs_emit_at(buf, out, "\n");
1483 	return out;
1484 }
1485 static DEVICE_ATTR_RO(errors);
1486 
1487 static ssize_t max_tokens_show(struct device *dev,
1488 			       struct device_attribute *attr, char *buf)
1489 {
1490 	struct idxd_device *idxd =
1491 		container_of(dev, struct idxd_device, conf_dev);
1492 
1493 	return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1494 }
1495 static DEVICE_ATTR_RO(max_tokens);
1496 
1497 static ssize_t token_limit_show(struct device *dev,
1498 				struct device_attribute *attr, char *buf)
1499 {
1500 	struct idxd_device *idxd =
1501 		container_of(dev, struct idxd_device, conf_dev);
1502 
1503 	return sysfs_emit(buf, "%u\n", idxd->token_limit);
1504 }
1505 
1506 static ssize_t token_limit_store(struct device *dev,
1507 				 struct device_attribute *attr,
1508 				 const char *buf, size_t count)
1509 {
1510 	struct idxd_device *idxd =
1511 		container_of(dev, struct idxd_device, conf_dev);
1512 	unsigned long val;
1513 	int rc;
1514 
1515 	rc = kstrtoul(buf, 10, &val);
1516 	if (rc < 0)
1517 		return -EINVAL;
1518 
1519 	if (idxd->state == IDXD_DEV_ENABLED)
1520 		return -EPERM;
1521 
1522 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1523 		return -EPERM;
1524 
1525 	if (!idxd->hw.group_cap.token_limit)
1526 		return -EPERM;
1527 
1528 	if (val > idxd->hw.group_cap.total_tokens)
1529 		return -EINVAL;
1530 
1531 	idxd->token_limit = val;
1532 	return count;
1533 }
1534 static DEVICE_ATTR_RW(token_limit);
1535 
1536 static ssize_t cdev_major_show(struct device *dev,
1537 			       struct device_attribute *attr, char *buf)
1538 {
1539 	struct idxd_device *idxd =
1540 		container_of(dev, struct idxd_device, conf_dev);
1541 
1542 	return sysfs_emit(buf, "%u\n", idxd->major);
1543 }
1544 static DEVICE_ATTR_RO(cdev_major);
1545 
1546 static ssize_t cmd_status_show(struct device *dev,
1547 			       struct device_attribute *attr, char *buf)
1548 {
1549 	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1550 
1551 	return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1552 }
1553 static DEVICE_ATTR_RO(cmd_status);
1554 
1555 static struct attribute *idxd_device_attributes[] = {
1556 	&dev_attr_version.attr,
1557 	&dev_attr_max_groups.attr,
1558 	&dev_attr_max_work_queues.attr,
1559 	&dev_attr_max_work_queues_size.attr,
1560 	&dev_attr_max_engines.attr,
1561 	&dev_attr_numa_node.attr,
1562 	&dev_attr_max_batch_size.attr,
1563 	&dev_attr_max_transfer_size.attr,
1564 	&dev_attr_op_cap.attr,
1565 	&dev_attr_gen_cap.attr,
1566 	&dev_attr_configurable.attr,
1567 	&dev_attr_clients.attr,
1568 	&dev_attr_pasid_enabled.attr,
1569 	&dev_attr_state.attr,
1570 	&dev_attr_errors.attr,
1571 	&dev_attr_max_tokens.attr,
1572 	&dev_attr_token_limit.attr,
1573 	&dev_attr_cdev_major.attr,
1574 	&dev_attr_cmd_status.attr,
1575 	NULL,
1576 };
1577 
1578 static const struct attribute_group idxd_device_attribute_group = {
1579 	.attrs = idxd_device_attributes,
1580 };
1581 
1582 static const struct attribute_group *idxd_attribute_groups[] = {
1583 	&idxd_device_attribute_group,
1584 	NULL,
1585 };
1586 
1587 static void idxd_conf_device_release(struct device *dev)
1588 {
1589 	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1590 
1591 	kfree(idxd->groups);
1592 	kfree(idxd->wqs);
1593 	kfree(idxd->engines);
1594 	kfree(idxd->irq_entries);
1595 	kfree(idxd->int_handles);
1596 	ida_free(&idxd_ida, idxd->id);
1597 	kfree(idxd);
1598 }
1599 
1600 struct device_type dsa_device_type = {
1601 	.name = "dsa",
1602 	.release = idxd_conf_device_release,
1603 	.groups = idxd_attribute_groups,
1604 };
1605 
1606 struct device_type iax_device_type = {
1607 	.name = "iax",
1608 	.release = idxd_conf_device_release,
1609 	.groups = idxd_attribute_groups,
1610 };
1611 
1612 static int idxd_register_engine_devices(struct idxd_device *idxd)
1613 {
1614 	int i, j, rc;
1615 
1616 	for (i = 0; i < idxd->max_engines; i++) {
1617 		struct idxd_engine *engine = idxd->engines[i];
1618 
1619 		rc = device_add(&engine->conf_dev);
1620 		if (rc < 0)
1621 			goto cleanup;
1622 	}
1623 
1624 	return 0;
1625 
1626 cleanup:
1627 	j = i - 1;
1628 	for (; i < idxd->max_engines; i++)
1629 		put_device(&idxd->engines[i]->conf_dev);
1630 
1631 	while (j--)
1632 		device_unregister(&idxd->engines[j]->conf_dev);
1633 	return rc;
1634 }
1635 
1636 static int idxd_register_group_devices(struct idxd_device *idxd)
1637 {
1638 	int i, j, rc;
1639 
1640 	for (i = 0; i < idxd->max_groups; i++) {
1641 		struct idxd_group *group = idxd->groups[i];
1642 
1643 		rc = device_add(&group->conf_dev);
1644 		if (rc < 0)
1645 			goto cleanup;
1646 	}
1647 
1648 	return 0;
1649 
1650 cleanup:
1651 	j = i - 1;
1652 	for (; i < idxd->max_groups; i++)
1653 		put_device(&idxd->groups[i]->conf_dev);
1654 
1655 	while (j--)
1656 		device_unregister(&idxd->groups[j]->conf_dev);
1657 	return rc;
1658 }
1659 
1660 static int idxd_register_wq_devices(struct idxd_device *idxd)
1661 {
1662 	int i, rc, j;
1663 
1664 	for (i = 0; i < idxd->max_wqs; i++) {
1665 		struct idxd_wq *wq = idxd->wqs[i];
1666 
1667 		rc = device_add(&wq->conf_dev);
1668 		if (rc < 0)
1669 			goto cleanup;
1670 	}
1671 
1672 	return 0;
1673 
1674 cleanup:
1675 	j = i - 1;
1676 	for (; i < idxd->max_wqs; i++)
1677 		put_device(&idxd->wqs[i]->conf_dev);
1678 
1679 	while (j--)
1680 		device_unregister(&idxd->wqs[j]->conf_dev);
1681 	return rc;
1682 }
1683 
1684 int idxd_register_devices(struct idxd_device *idxd)
1685 {
1686 	struct device *dev = &idxd->pdev->dev;
1687 	int rc, i;
1688 
1689 	rc = device_add(&idxd->conf_dev);
1690 	if (rc < 0)
1691 		return rc;
1692 
1693 	rc = idxd_register_wq_devices(idxd);
1694 	if (rc < 0) {
1695 		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1696 		goto err_wq;
1697 	}
1698 
1699 	rc = idxd_register_engine_devices(idxd);
1700 	if (rc < 0) {
1701 		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1702 		goto err_engine;
1703 	}
1704 
1705 	rc = idxd_register_group_devices(idxd);
1706 	if (rc < 0) {
1707 		dev_dbg(dev, "Group device registering failed: %d\n", rc);
1708 		goto err_group;
1709 	}
1710 
1711 	return 0;
1712 
1713  err_group:
1714 	for (i = 0; i < idxd->max_engines; i++)
1715 		device_unregister(&idxd->engines[i]->conf_dev);
1716  err_engine:
1717 	for (i = 0; i < idxd->max_wqs; i++)
1718 		device_unregister(&idxd->wqs[i]->conf_dev);
1719  err_wq:
1720 	device_del(&idxd->conf_dev);
1721 	return rc;
1722 }
1723 
1724 void idxd_unregister_devices(struct idxd_device *idxd)
1725 {
1726 	int i;
1727 
1728 	for (i = 0; i < idxd->max_wqs; i++) {
1729 		struct idxd_wq *wq = idxd->wqs[i];
1730 
1731 		device_unregister(&wq->conf_dev);
1732 	}
1733 
1734 	for (i = 0; i < idxd->max_engines; i++) {
1735 		struct idxd_engine *engine = idxd->engines[i];
1736 
1737 		device_unregister(&engine->conf_dev);
1738 	}
1739 
1740 	for (i = 0; i < idxd->max_groups; i++) {
1741 		struct idxd_group *group = idxd->groups[i];
1742 
1743 		device_unregister(&group->conf_dev);
1744 	}
1745 
1746 	device_unregister(&idxd->conf_dev);
1747 }
1748 
1749 int idxd_register_bus_type(void)
1750 {
1751 	return bus_register(&dsa_bus_type);
1752 }
1753 
1754 void idxd_unregister_bus_type(void)
1755 {
1756 	bus_unregister(&dsa_bus_type);
1757 }
1758