xref: /openbmc/linux/drivers/dma/idxd/sysfs.c (revision 2bc7d3e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 /* IDXD engine attributes */
20 static ssize_t engine_group_id_show(struct device *dev,
21 				    struct device_attribute *attr, char *buf)
22 {
23 	struct idxd_engine *engine = confdev_to_engine(dev);
24 
25 	if (engine->group)
26 		return sysfs_emit(buf, "%d\n", engine->group->id);
27 	else
28 		return sysfs_emit(buf, "%d\n", -1);
29 }
30 
31 static ssize_t engine_group_id_store(struct device *dev,
32 				     struct device_attribute *attr,
33 				     const char *buf, size_t count)
34 {
35 	struct idxd_engine *engine = confdev_to_engine(dev);
36 	struct idxd_device *idxd = engine->idxd;
37 	long id;
38 	int rc;
39 	struct idxd_group *prevg;
40 
41 	rc = kstrtol(buf, 10, &id);
42 	if (rc < 0)
43 		return -EINVAL;
44 
45 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
46 		return -EPERM;
47 
48 	if (id > idxd->max_groups - 1 || id < -1)
49 		return -EINVAL;
50 
51 	if (id == -1) {
52 		if (engine->group) {
53 			engine->group->num_engines--;
54 			engine->group = NULL;
55 		}
56 		return count;
57 	}
58 
59 	prevg = engine->group;
60 
61 	if (prevg)
62 		prevg->num_engines--;
63 	engine->group = idxd->groups[id];
64 	engine->group->num_engines++;
65 
66 	return count;
67 }
68 
69 static struct device_attribute dev_attr_engine_group =
70 		__ATTR(group_id, 0644, engine_group_id_show,
71 		       engine_group_id_store);
72 
73 static struct attribute *idxd_engine_attributes[] = {
74 	&dev_attr_engine_group.attr,
75 	NULL,
76 };
77 
78 static const struct attribute_group idxd_engine_attribute_group = {
79 	.attrs = idxd_engine_attributes,
80 };
81 
82 static const struct attribute_group *idxd_engine_attribute_groups[] = {
83 	&idxd_engine_attribute_group,
84 	NULL,
85 };
86 
87 static void idxd_conf_engine_release(struct device *dev)
88 {
89 	struct idxd_engine *engine = confdev_to_engine(dev);
90 
91 	kfree(engine);
92 }
93 
94 struct device_type idxd_engine_device_type = {
95 	.name = "engine",
96 	.release = idxd_conf_engine_release,
97 	.groups = idxd_engine_attribute_groups,
98 };
99 
100 /* Group attributes */
101 
102 static void idxd_set_free_rdbufs(struct idxd_device *idxd)
103 {
104 	int i, rdbufs;
105 
106 	for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
107 		struct idxd_group *g = idxd->groups[i];
108 
109 		rdbufs += g->rdbufs_reserved;
110 	}
111 
112 	idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
113 }
114 
115 static ssize_t group_read_buffers_reserved_show(struct device *dev,
116 						struct device_attribute *attr,
117 						char *buf)
118 {
119 	struct idxd_group *group = confdev_to_group(dev);
120 
121 	return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
122 }
123 
124 static ssize_t group_tokens_reserved_show(struct device *dev,
125 					  struct device_attribute *attr,
126 					  char *buf)
127 {
128 	dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
129 	return group_read_buffers_reserved_show(dev, attr, buf);
130 }
131 
132 static ssize_t group_read_buffers_reserved_store(struct device *dev,
133 						 struct device_attribute *attr,
134 						 const char *buf, size_t count)
135 {
136 	struct idxd_group *group = confdev_to_group(dev);
137 	struct idxd_device *idxd = group->idxd;
138 	unsigned long val;
139 	int rc;
140 
141 	rc = kstrtoul(buf, 10, &val);
142 	if (rc < 0)
143 		return -EINVAL;
144 
145 	if (idxd->data->type == IDXD_TYPE_IAX)
146 		return -EOPNOTSUPP;
147 
148 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
149 		return -EPERM;
150 
151 	if (idxd->state == IDXD_DEV_ENABLED)
152 		return -EPERM;
153 
154 	if (val > idxd->max_rdbufs)
155 		return -EINVAL;
156 
157 	if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
158 		return -EINVAL;
159 
160 	group->rdbufs_reserved = val;
161 	idxd_set_free_rdbufs(idxd);
162 	return count;
163 }
164 
165 static ssize_t group_tokens_reserved_store(struct device *dev,
166 					   struct device_attribute *attr,
167 					   const char *buf, size_t count)
168 {
169 	dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
170 	return group_read_buffers_reserved_store(dev, attr, buf, count);
171 }
172 
173 static struct device_attribute dev_attr_group_tokens_reserved =
174 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
175 		       group_tokens_reserved_store);
176 
177 static struct device_attribute dev_attr_group_read_buffers_reserved =
178 		__ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
179 		       group_read_buffers_reserved_store);
180 
181 static ssize_t group_read_buffers_allowed_show(struct device *dev,
182 					       struct device_attribute *attr,
183 					       char *buf)
184 {
185 	struct idxd_group *group = confdev_to_group(dev);
186 
187 	return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
188 }
189 
190 static ssize_t group_tokens_allowed_show(struct device *dev,
191 					 struct device_attribute *attr,
192 					 char *buf)
193 {
194 	dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
195 	return group_read_buffers_allowed_show(dev, attr, buf);
196 }
197 
198 static ssize_t group_read_buffers_allowed_store(struct device *dev,
199 						struct device_attribute *attr,
200 						const char *buf, size_t count)
201 {
202 	struct idxd_group *group = confdev_to_group(dev);
203 	struct idxd_device *idxd = group->idxd;
204 	unsigned long val;
205 	int rc;
206 
207 	rc = kstrtoul(buf, 10, &val);
208 	if (rc < 0)
209 		return -EINVAL;
210 
211 	if (idxd->data->type == IDXD_TYPE_IAX)
212 		return -EOPNOTSUPP;
213 
214 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
215 		return -EPERM;
216 
217 	if (idxd->state == IDXD_DEV_ENABLED)
218 		return -EPERM;
219 
220 	if (val < 4 * group->num_engines ||
221 	    val > group->rdbufs_reserved + idxd->nr_rdbufs)
222 		return -EINVAL;
223 
224 	group->rdbufs_allowed = val;
225 	return count;
226 }
227 
228 static ssize_t group_tokens_allowed_store(struct device *dev,
229 					  struct device_attribute *attr,
230 					  const char *buf, size_t count)
231 {
232 	dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
233 	return group_read_buffers_allowed_store(dev, attr, buf, count);
234 }
235 
236 static struct device_attribute dev_attr_group_tokens_allowed =
237 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
238 		       group_tokens_allowed_store);
239 
240 static struct device_attribute dev_attr_group_read_buffers_allowed =
241 		__ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
242 		       group_read_buffers_allowed_store);
243 
244 static ssize_t group_use_read_buffer_limit_show(struct device *dev,
245 						struct device_attribute *attr,
246 						char *buf)
247 {
248 	struct idxd_group *group = confdev_to_group(dev);
249 
250 	return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
251 }
252 
253 static ssize_t group_use_token_limit_show(struct device *dev,
254 					  struct device_attribute *attr,
255 					  char *buf)
256 {
257 	dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
258 	return group_use_read_buffer_limit_show(dev, attr, buf);
259 }
260 
261 static ssize_t group_use_read_buffer_limit_store(struct device *dev,
262 						 struct device_attribute *attr,
263 						 const char *buf, size_t count)
264 {
265 	struct idxd_group *group = confdev_to_group(dev);
266 	struct idxd_device *idxd = group->idxd;
267 	unsigned long val;
268 	int rc;
269 
270 	rc = kstrtoul(buf, 10, &val);
271 	if (rc < 0)
272 		return -EINVAL;
273 
274 	if (idxd->data->type == IDXD_TYPE_IAX)
275 		return -EOPNOTSUPP;
276 
277 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
278 		return -EPERM;
279 
280 	if (idxd->state == IDXD_DEV_ENABLED)
281 		return -EPERM;
282 
283 	if (idxd->rdbuf_limit == 0)
284 		return -EPERM;
285 
286 	group->use_rdbuf_limit = !!val;
287 	return count;
288 }
289 
290 static ssize_t group_use_token_limit_store(struct device *dev,
291 					   struct device_attribute *attr,
292 					   const char *buf, size_t count)
293 {
294 	dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
295 	return group_use_read_buffer_limit_store(dev, attr, buf, count);
296 }
297 
298 static struct device_attribute dev_attr_group_use_token_limit =
299 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
300 		       group_use_token_limit_store);
301 
302 static struct device_attribute dev_attr_group_use_read_buffer_limit =
303 		__ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
304 		       group_use_read_buffer_limit_store);
305 
306 static ssize_t group_engines_show(struct device *dev,
307 				  struct device_attribute *attr, char *buf)
308 {
309 	struct idxd_group *group = confdev_to_group(dev);
310 	int i, rc = 0;
311 	struct idxd_device *idxd = group->idxd;
312 
313 	for (i = 0; i < idxd->max_engines; i++) {
314 		struct idxd_engine *engine = idxd->engines[i];
315 
316 		if (!engine->group)
317 			continue;
318 
319 		if (engine->group->id == group->id)
320 			rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
321 	}
322 
323 	if (!rc)
324 		return 0;
325 	rc--;
326 	rc += sysfs_emit_at(buf, rc, "\n");
327 
328 	return rc;
329 }
330 
331 static struct device_attribute dev_attr_group_engines =
332 		__ATTR(engines, 0444, group_engines_show, NULL);
333 
334 static ssize_t group_work_queues_show(struct device *dev,
335 				      struct device_attribute *attr, char *buf)
336 {
337 	struct idxd_group *group = confdev_to_group(dev);
338 	int i, rc = 0;
339 	struct idxd_device *idxd = group->idxd;
340 
341 	for (i = 0; i < idxd->max_wqs; i++) {
342 		struct idxd_wq *wq = idxd->wqs[i];
343 
344 		if (!wq->group)
345 			continue;
346 
347 		if (wq->group->id == group->id)
348 			rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
349 	}
350 
351 	if (!rc)
352 		return 0;
353 	rc--;
354 	rc += sysfs_emit_at(buf, rc, "\n");
355 
356 	return rc;
357 }
358 
359 static struct device_attribute dev_attr_group_work_queues =
360 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
361 
362 static ssize_t group_traffic_class_a_show(struct device *dev,
363 					  struct device_attribute *attr,
364 					  char *buf)
365 {
366 	struct idxd_group *group = confdev_to_group(dev);
367 
368 	return sysfs_emit(buf, "%d\n", group->tc_a);
369 }
370 
371 static ssize_t group_traffic_class_a_store(struct device *dev,
372 					   struct device_attribute *attr,
373 					   const char *buf, size_t count)
374 {
375 	struct idxd_group *group = confdev_to_group(dev);
376 	struct idxd_device *idxd = group->idxd;
377 	long val;
378 	int rc;
379 
380 	rc = kstrtol(buf, 10, &val);
381 	if (rc < 0)
382 		return -EINVAL;
383 
384 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
385 		return -EPERM;
386 
387 	if (idxd->state == IDXD_DEV_ENABLED)
388 		return -EPERM;
389 
390 	if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
391 		return -EPERM;
392 
393 	if (val < 0 || val > 7)
394 		return -EINVAL;
395 
396 	group->tc_a = val;
397 	return count;
398 }
399 
400 static struct device_attribute dev_attr_group_traffic_class_a =
401 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
402 		       group_traffic_class_a_store);
403 
404 static ssize_t group_traffic_class_b_show(struct device *dev,
405 					  struct device_attribute *attr,
406 					  char *buf)
407 {
408 	struct idxd_group *group = confdev_to_group(dev);
409 
410 	return sysfs_emit(buf, "%d\n", group->tc_b);
411 }
412 
413 static ssize_t group_traffic_class_b_store(struct device *dev,
414 					   struct device_attribute *attr,
415 					   const char *buf, size_t count)
416 {
417 	struct idxd_group *group = confdev_to_group(dev);
418 	struct idxd_device *idxd = group->idxd;
419 	long val;
420 	int rc;
421 
422 	rc = kstrtol(buf, 10, &val);
423 	if (rc < 0)
424 		return -EINVAL;
425 
426 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
427 		return -EPERM;
428 
429 	if (idxd->state == IDXD_DEV_ENABLED)
430 		return -EPERM;
431 
432 	if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
433 		return -EPERM;
434 
435 	if (val < 0 || val > 7)
436 		return -EINVAL;
437 
438 	group->tc_b = val;
439 	return count;
440 }
441 
442 static struct device_attribute dev_attr_group_traffic_class_b =
443 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
444 		       group_traffic_class_b_store);
445 
446 static struct attribute *idxd_group_attributes[] = {
447 	&dev_attr_group_work_queues.attr,
448 	&dev_attr_group_engines.attr,
449 	&dev_attr_group_use_token_limit.attr,
450 	&dev_attr_group_use_read_buffer_limit.attr,
451 	&dev_attr_group_tokens_allowed.attr,
452 	&dev_attr_group_read_buffers_allowed.attr,
453 	&dev_attr_group_tokens_reserved.attr,
454 	&dev_attr_group_read_buffers_reserved.attr,
455 	&dev_attr_group_traffic_class_a.attr,
456 	&dev_attr_group_traffic_class_b.attr,
457 	NULL,
458 };
459 
460 static const struct attribute_group idxd_group_attribute_group = {
461 	.attrs = idxd_group_attributes,
462 };
463 
464 static const struct attribute_group *idxd_group_attribute_groups[] = {
465 	&idxd_group_attribute_group,
466 	NULL,
467 };
468 
469 static void idxd_conf_group_release(struct device *dev)
470 {
471 	struct idxd_group *group = confdev_to_group(dev);
472 
473 	kfree(group);
474 }
475 
476 struct device_type idxd_group_device_type = {
477 	.name = "group",
478 	.release = idxd_conf_group_release,
479 	.groups = idxd_group_attribute_groups,
480 };
481 
482 /* IDXD work queue attribs */
483 static ssize_t wq_clients_show(struct device *dev,
484 			       struct device_attribute *attr, char *buf)
485 {
486 	struct idxd_wq *wq = confdev_to_wq(dev);
487 
488 	return sysfs_emit(buf, "%d\n", wq->client_count);
489 }
490 
491 static struct device_attribute dev_attr_wq_clients =
492 		__ATTR(clients, 0444, wq_clients_show, NULL);
493 
494 static ssize_t wq_state_show(struct device *dev,
495 			     struct device_attribute *attr, char *buf)
496 {
497 	struct idxd_wq *wq = confdev_to_wq(dev);
498 
499 	switch (wq->state) {
500 	case IDXD_WQ_DISABLED:
501 		return sysfs_emit(buf, "disabled\n");
502 	case IDXD_WQ_ENABLED:
503 		return sysfs_emit(buf, "enabled\n");
504 	}
505 
506 	return sysfs_emit(buf, "unknown\n");
507 }
508 
509 static struct device_attribute dev_attr_wq_state =
510 		__ATTR(state, 0444, wq_state_show, NULL);
511 
512 static ssize_t wq_group_id_show(struct device *dev,
513 				struct device_attribute *attr, char *buf)
514 {
515 	struct idxd_wq *wq = confdev_to_wq(dev);
516 
517 	if (wq->group)
518 		return sysfs_emit(buf, "%u\n", wq->group->id);
519 	else
520 		return sysfs_emit(buf, "-1\n");
521 }
522 
523 static ssize_t wq_group_id_store(struct device *dev,
524 				 struct device_attribute *attr,
525 				 const char *buf, size_t count)
526 {
527 	struct idxd_wq *wq = confdev_to_wq(dev);
528 	struct idxd_device *idxd = wq->idxd;
529 	long id;
530 	int rc;
531 	struct idxd_group *prevg, *group;
532 
533 	rc = kstrtol(buf, 10, &id);
534 	if (rc < 0)
535 		return -EINVAL;
536 
537 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
538 		return -EPERM;
539 
540 	if (wq->state != IDXD_WQ_DISABLED)
541 		return -EPERM;
542 
543 	if (id > idxd->max_groups - 1 || id < -1)
544 		return -EINVAL;
545 
546 	if (id == -1) {
547 		if (wq->group) {
548 			wq->group->num_wqs--;
549 			wq->group = NULL;
550 		}
551 		return count;
552 	}
553 
554 	group = idxd->groups[id];
555 	prevg = wq->group;
556 
557 	if (prevg)
558 		prevg->num_wqs--;
559 	wq->group = group;
560 	group->num_wqs++;
561 	return count;
562 }
563 
564 static struct device_attribute dev_attr_wq_group_id =
565 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
566 
567 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
568 			    char *buf)
569 {
570 	struct idxd_wq *wq = confdev_to_wq(dev);
571 
572 	return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
573 }
574 
575 static ssize_t wq_mode_store(struct device *dev,
576 			     struct device_attribute *attr, const char *buf,
577 			     size_t count)
578 {
579 	struct idxd_wq *wq = confdev_to_wq(dev);
580 	struct idxd_device *idxd = wq->idxd;
581 
582 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
583 		return -EPERM;
584 
585 	if (wq->state != IDXD_WQ_DISABLED)
586 		return -EPERM;
587 
588 	if (sysfs_streq(buf, "dedicated")) {
589 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
590 		wq->threshold = 0;
591 	} else if (sysfs_streq(buf, "shared")) {
592 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
593 	} else {
594 		return -EINVAL;
595 	}
596 
597 	return count;
598 }
599 
600 static struct device_attribute dev_attr_wq_mode =
601 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
602 
603 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
604 			    char *buf)
605 {
606 	struct idxd_wq *wq = confdev_to_wq(dev);
607 
608 	return sysfs_emit(buf, "%u\n", wq->size);
609 }
610 
611 static int total_claimed_wq_size(struct idxd_device *idxd)
612 {
613 	int i;
614 	int wq_size = 0;
615 
616 	for (i = 0; i < idxd->max_wqs; i++) {
617 		struct idxd_wq *wq = idxd->wqs[i];
618 
619 		wq_size += wq->size;
620 	}
621 
622 	return wq_size;
623 }
624 
625 static ssize_t wq_size_store(struct device *dev,
626 			     struct device_attribute *attr, const char *buf,
627 			     size_t count)
628 {
629 	struct idxd_wq *wq = confdev_to_wq(dev);
630 	unsigned long size;
631 	struct idxd_device *idxd = wq->idxd;
632 	int rc;
633 
634 	rc = kstrtoul(buf, 10, &size);
635 	if (rc < 0)
636 		return -EINVAL;
637 
638 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
639 		return -EPERM;
640 
641 	if (idxd->state == IDXD_DEV_ENABLED)
642 		return -EPERM;
643 
644 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
645 		return -EINVAL;
646 
647 	wq->size = size;
648 	return count;
649 }
650 
651 static struct device_attribute dev_attr_wq_size =
652 		__ATTR(size, 0644, wq_size_show, wq_size_store);
653 
654 static ssize_t wq_priority_show(struct device *dev,
655 				struct device_attribute *attr, char *buf)
656 {
657 	struct idxd_wq *wq = confdev_to_wq(dev);
658 
659 	return sysfs_emit(buf, "%u\n", wq->priority);
660 }
661 
662 static ssize_t wq_priority_store(struct device *dev,
663 				 struct device_attribute *attr,
664 				 const char *buf, size_t count)
665 {
666 	struct idxd_wq *wq = confdev_to_wq(dev);
667 	unsigned long prio;
668 	struct idxd_device *idxd = wq->idxd;
669 	int rc;
670 
671 	rc = kstrtoul(buf, 10, &prio);
672 	if (rc < 0)
673 		return -EINVAL;
674 
675 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
676 		return -EPERM;
677 
678 	if (wq->state != IDXD_WQ_DISABLED)
679 		return -EPERM;
680 
681 	if (prio > IDXD_MAX_PRIORITY)
682 		return -EINVAL;
683 
684 	wq->priority = prio;
685 	return count;
686 }
687 
688 static struct device_attribute dev_attr_wq_priority =
689 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
690 
691 static ssize_t wq_block_on_fault_show(struct device *dev,
692 				      struct device_attribute *attr, char *buf)
693 {
694 	struct idxd_wq *wq = confdev_to_wq(dev);
695 
696 	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
697 }
698 
699 static ssize_t wq_block_on_fault_store(struct device *dev,
700 				       struct device_attribute *attr,
701 				       const char *buf, size_t count)
702 {
703 	struct idxd_wq *wq = confdev_to_wq(dev);
704 	struct idxd_device *idxd = wq->idxd;
705 	bool bof;
706 	int rc;
707 
708 	if (!idxd->hw.gen_cap.block_on_fault)
709 		return -EOPNOTSUPP;
710 
711 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
712 		return -EPERM;
713 
714 	if (wq->state != IDXD_WQ_DISABLED)
715 		return -ENXIO;
716 
717 	rc = kstrtobool(buf, &bof);
718 	if (rc < 0)
719 		return rc;
720 
721 	if (bof)
722 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
723 	else
724 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
725 
726 	return count;
727 }
728 
729 static struct device_attribute dev_attr_wq_block_on_fault =
730 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
731 		       wq_block_on_fault_store);
732 
733 static ssize_t wq_threshold_show(struct device *dev,
734 				 struct device_attribute *attr, char *buf)
735 {
736 	struct idxd_wq *wq = confdev_to_wq(dev);
737 
738 	return sysfs_emit(buf, "%u\n", wq->threshold);
739 }
740 
741 static ssize_t wq_threshold_store(struct device *dev,
742 				  struct device_attribute *attr,
743 				  const char *buf, size_t count)
744 {
745 	struct idxd_wq *wq = confdev_to_wq(dev);
746 	struct idxd_device *idxd = wq->idxd;
747 	unsigned int val;
748 	int rc;
749 
750 	rc = kstrtouint(buf, 0, &val);
751 	if (rc < 0)
752 		return -EINVAL;
753 
754 	if (val > wq->size || val <= 0)
755 		return -EINVAL;
756 
757 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
758 		return -EPERM;
759 
760 	if (wq->state != IDXD_WQ_DISABLED)
761 		return -ENXIO;
762 
763 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
764 		return -EINVAL;
765 
766 	wq->threshold = val;
767 
768 	return count;
769 }
770 
771 static struct device_attribute dev_attr_wq_threshold =
772 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
773 
774 static ssize_t wq_type_show(struct device *dev,
775 			    struct device_attribute *attr, char *buf)
776 {
777 	struct idxd_wq *wq = confdev_to_wq(dev);
778 
779 	switch (wq->type) {
780 	case IDXD_WQT_KERNEL:
781 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
782 	case IDXD_WQT_USER:
783 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
784 	case IDXD_WQT_NONE:
785 	default:
786 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
787 	}
788 
789 	return -EINVAL;
790 }
791 
792 static ssize_t wq_type_store(struct device *dev,
793 			     struct device_attribute *attr, const char *buf,
794 			     size_t count)
795 {
796 	struct idxd_wq *wq = confdev_to_wq(dev);
797 	enum idxd_wq_type old_type;
798 
799 	if (wq->state != IDXD_WQ_DISABLED)
800 		return -EPERM;
801 
802 	old_type = wq->type;
803 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
804 		wq->type = IDXD_WQT_NONE;
805 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
806 		wq->type = IDXD_WQT_KERNEL;
807 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
808 		wq->type = IDXD_WQT_USER;
809 	else
810 		return -EINVAL;
811 
812 	/* If we are changing queue type, clear the name */
813 	if (wq->type != old_type)
814 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
815 
816 	return count;
817 }
818 
819 static struct device_attribute dev_attr_wq_type =
820 		__ATTR(type, 0644, wq_type_show, wq_type_store);
821 
822 static ssize_t wq_name_show(struct device *dev,
823 			    struct device_attribute *attr, char *buf)
824 {
825 	struct idxd_wq *wq = confdev_to_wq(dev);
826 
827 	return sysfs_emit(buf, "%s\n", wq->name);
828 }
829 
830 static ssize_t wq_name_store(struct device *dev,
831 			     struct device_attribute *attr, const char *buf,
832 			     size_t count)
833 {
834 	struct idxd_wq *wq = confdev_to_wq(dev);
835 	char *input, *pos;
836 
837 	if (wq->state != IDXD_WQ_DISABLED)
838 		return -EPERM;
839 
840 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
841 		return -EINVAL;
842 
843 	/*
844 	 * This is temporarily placed here until we have SVM support for
845 	 * dmaengine.
846 	 */
847 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
848 		return -EOPNOTSUPP;
849 
850 	input = kstrndup(buf, count, GFP_KERNEL);
851 	if (!input)
852 		return -ENOMEM;
853 
854 	pos = strim(input);
855 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
856 	sprintf(wq->name, "%s", pos);
857 	kfree(input);
858 	return count;
859 }
860 
861 static struct device_attribute dev_attr_wq_name =
862 		__ATTR(name, 0644, wq_name_show, wq_name_store);
863 
864 static ssize_t wq_cdev_minor_show(struct device *dev,
865 				  struct device_attribute *attr, char *buf)
866 {
867 	struct idxd_wq *wq = confdev_to_wq(dev);
868 	int minor = -1;
869 
870 	mutex_lock(&wq->wq_lock);
871 	if (wq->idxd_cdev)
872 		minor = wq->idxd_cdev->minor;
873 	mutex_unlock(&wq->wq_lock);
874 
875 	if (minor == -1)
876 		return -ENXIO;
877 	return sysfs_emit(buf, "%d\n", minor);
878 }
879 
880 static struct device_attribute dev_attr_wq_cdev_minor =
881 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
882 
883 static int __get_sysfs_u64(const char *buf, u64 *val)
884 {
885 	int rc;
886 
887 	rc = kstrtou64(buf, 0, val);
888 	if (rc < 0)
889 		return -EINVAL;
890 
891 	if (*val == 0)
892 		return -EINVAL;
893 
894 	*val = roundup_pow_of_two(*val);
895 	return 0;
896 }
897 
898 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
899 					 char *buf)
900 {
901 	struct idxd_wq *wq = confdev_to_wq(dev);
902 
903 	return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
904 }
905 
906 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
907 					  const char *buf, size_t count)
908 {
909 	struct idxd_wq *wq = confdev_to_wq(dev);
910 	struct idxd_device *idxd = wq->idxd;
911 	u64 xfer_size;
912 	int rc;
913 
914 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
915 		return -EPERM;
916 
917 	if (wq->state != IDXD_WQ_DISABLED)
918 		return -EPERM;
919 
920 	rc = __get_sysfs_u64(buf, &xfer_size);
921 	if (rc < 0)
922 		return rc;
923 
924 	if (xfer_size > idxd->max_xfer_bytes)
925 		return -EINVAL;
926 
927 	wq->max_xfer_bytes = xfer_size;
928 
929 	return count;
930 }
931 
932 static struct device_attribute dev_attr_wq_max_transfer_size =
933 		__ATTR(max_transfer_size, 0644,
934 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
935 
936 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
937 {
938 	struct idxd_wq *wq = confdev_to_wq(dev);
939 
940 	return sysfs_emit(buf, "%u\n", wq->max_batch_size);
941 }
942 
943 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
944 				       const char *buf, size_t count)
945 {
946 	struct idxd_wq *wq = confdev_to_wq(dev);
947 	struct idxd_device *idxd = wq->idxd;
948 	u64 batch_size;
949 	int rc;
950 
951 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
952 		return -EPERM;
953 
954 	if (wq->state != IDXD_WQ_DISABLED)
955 		return -EPERM;
956 
957 	rc = __get_sysfs_u64(buf, &batch_size);
958 	if (rc < 0)
959 		return rc;
960 
961 	if (batch_size > idxd->max_batch_size)
962 		return -EINVAL;
963 
964 	wq->max_batch_size = (u32)batch_size;
965 
966 	return count;
967 }
968 
969 static struct device_attribute dev_attr_wq_max_batch_size =
970 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
971 
972 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
973 {
974 	struct idxd_wq *wq = confdev_to_wq(dev);
975 
976 	return sysfs_emit(buf, "%u\n", wq->ats_dis);
977 }
978 
979 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
980 				    const char *buf, size_t count)
981 {
982 	struct idxd_wq *wq = confdev_to_wq(dev);
983 	struct idxd_device *idxd = wq->idxd;
984 	bool ats_dis;
985 	int rc;
986 
987 	if (wq->state != IDXD_WQ_DISABLED)
988 		return -EPERM;
989 
990 	if (!idxd->hw.wq_cap.wq_ats_support)
991 		return -EOPNOTSUPP;
992 
993 	rc = kstrtobool(buf, &ats_dis);
994 	if (rc < 0)
995 		return rc;
996 
997 	wq->ats_dis = ats_dis;
998 
999 	return count;
1000 }
1001 
1002 static struct device_attribute dev_attr_wq_ats_disable =
1003 		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1004 
1005 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1006 {
1007 	struct idxd_wq *wq = confdev_to_wq(dev);
1008 	struct idxd_device *idxd = wq->idxd;
1009 	u32 occup, offset;
1010 
1011 	if (!idxd->hw.wq_cap.occupancy)
1012 		return -EOPNOTSUPP;
1013 
1014 	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1015 	occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1016 
1017 	return sysfs_emit(buf, "%u\n", occup);
1018 }
1019 
1020 static struct device_attribute dev_attr_wq_occupancy =
1021 		__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1022 
1023 static ssize_t wq_enqcmds_retries_show(struct device *dev,
1024 				       struct device_attribute *attr, char *buf)
1025 {
1026 	struct idxd_wq *wq = confdev_to_wq(dev);
1027 
1028 	if (wq_dedicated(wq))
1029 		return -EOPNOTSUPP;
1030 
1031 	return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
1032 }
1033 
1034 static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
1035 					const char *buf, size_t count)
1036 {
1037 	struct idxd_wq *wq = confdev_to_wq(dev);
1038 	int rc;
1039 	unsigned int retries;
1040 
1041 	if (wq_dedicated(wq))
1042 		return -EOPNOTSUPP;
1043 
1044 	rc = kstrtouint(buf, 10, &retries);
1045 	if (rc < 0)
1046 		return rc;
1047 
1048 	if (retries > IDXD_ENQCMDS_MAX_RETRIES)
1049 		retries = IDXD_ENQCMDS_MAX_RETRIES;
1050 
1051 	wq->enqcmds_retries = retries;
1052 	return count;
1053 }
1054 
1055 static struct device_attribute dev_attr_wq_enqcmds_retries =
1056 		__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
1057 
1058 static struct attribute *idxd_wq_attributes[] = {
1059 	&dev_attr_wq_clients.attr,
1060 	&dev_attr_wq_state.attr,
1061 	&dev_attr_wq_group_id.attr,
1062 	&dev_attr_wq_mode.attr,
1063 	&dev_attr_wq_size.attr,
1064 	&dev_attr_wq_priority.attr,
1065 	&dev_attr_wq_block_on_fault.attr,
1066 	&dev_attr_wq_threshold.attr,
1067 	&dev_attr_wq_type.attr,
1068 	&dev_attr_wq_name.attr,
1069 	&dev_attr_wq_cdev_minor.attr,
1070 	&dev_attr_wq_max_transfer_size.attr,
1071 	&dev_attr_wq_max_batch_size.attr,
1072 	&dev_attr_wq_ats_disable.attr,
1073 	&dev_attr_wq_occupancy.attr,
1074 	&dev_attr_wq_enqcmds_retries.attr,
1075 	NULL,
1076 };
1077 
1078 static const struct attribute_group idxd_wq_attribute_group = {
1079 	.attrs = idxd_wq_attributes,
1080 };
1081 
1082 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1083 	&idxd_wq_attribute_group,
1084 	NULL,
1085 };
1086 
1087 static void idxd_conf_wq_release(struct device *dev)
1088 {
1089 	struct idxd_wq *wq = confdev_to_wq(dev);
1090 
1091 	kfree(wq->wqcfg);
1092 	kfree(wq);
1093 }
1094 
1095 struct device_type idxd_wq_device_type = {
1096 	.name = "wq",
1097 	.release = idxd_conf_wq_release,
1098 	.groups = idxd_wq_attribute_groups,
1099 };
1100 
1101 /* IDXD device attribs */
1102 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1103 			    char *buf)
1104 {
1105 	struct idxd_device *idxd = confdev_to_idxd(dev);
1106 
1107 	return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1108 }
1109 static DEVICE_ATTR_RO(version);
1110 
1111 static ssize_t max_work_queues_size_show(struct device *dev,
1112 					 struct device_attribute *attr,
1113 					 char *buf)
1114 {
1115 	struct idxd_device *idxd = confdev_to_idxd(dev);
1116 
1117 	return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1118 }
1119 static DEVICE_ATTR_RO(max_work_queues_size);
1120 
1121 static ssize_t max_groups_show(struct device *dev,
1122 			       struct device_attribute *attr, char *buf)
1123 {
1124 	struct idxd_device *idxd = confdev_to_idxd(dev);
1125 
1126 	return sysfs_emit(buf, "%u\n", idxd->max_groups);
1127 }
1128 static DEVICE_ATTR_RO(max_groups);
1129 
1130 static ssize_t max_work_queues_show(struct device *dev,
1131 				    struct device_attribute *attr, char *buf)
1132 {
1133 	struct idxd_device *idxd = confdev_to_idxd(dev);
1134 
1135 	return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1136 }
1137 static DEVICE_ATTR_RO(max_work_queues);
1138 
1139 static ssize_t max_engines_show(struct device *dev,
1140 				struct device_attribute *attr, char *buf)
1141 {
1142 	struct idxd_device *idxd = confdev_to_idxd(dev);
1143 
1144 	return sysfs_emit(buf, "%u\n", idxd->max_engines);
1145 }
1146 static DEVICE_ATTR_RO(max_engines);
1147 
1148 static ssize_t numa_node_show(struct device *dev,
1149 			      struct device_attribute *attr, char *buf)
1150 {
1151 	struct idxd_device *idxd = confdev_to_idxd(dev);
1152 
1153 	return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1154 }
1155 static DEVICE_ATTR_RO(numa_node);
1156 
1157 static ssize_t max_batch_size_show(struct device *dev,
1158 				   struct device_attribute *attr, char *buf)
1159 {
1160 	struct idxd_device *idxd = confdev_to_idxd(dev);
1161 
1162 	return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1163 }
1164 static DEVICE_ATTR_RO(max_batch_size);
1165 
1166 static ssize_t max_transfer_size_show(struct device *dev,
1167 				      struct device_attribute *attr,
1168 				      char *buf)
1169 {
1170 	struct idxd_device *idxd = confdev_to_idxd(dev);
1171 
1172 	return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1173 }
1174 static DEVICE_ATTR_RO(max_transfer_size);
1175 
1176 static ssize_t op_cap_show(struct device *dev,
1177 			   struct device_attribute *attr, char *buf)
1178 {
1179 	struct idxd_device *idxd = confdev_to_idxd(dev);
1180 	int i, rc = 0;
1181 
1182 	for (i = 0; i < 4; i++)
1183 		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1184 
1185 	rc--;
1186 	rc += sysfs_emit_at(buf, rc, "\n");
1187 	return rc;
1188 }
1189 static DEVICE_ATTR_RO(op_cap);
1190 
1191 static ssize_t gen_cap_show(struct device *dev,
1192 			    struct device_attribute *attr, char *buf)
1193 {
1194 	struct idxd_device *idxd = confdev_to_idxd(dev);
1195 
1196 	return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1197 }
1198 static DEVICE_ATTR_RO(gen_cap);
1199 
1200 static ssize_t configurable_show(struct device *dev,
1201 				 struct device_attribute *attr, char *buf)
1202 {
1203 	struct idxd_device *idxd = confdev_to_idxd(dev);
1204 
1205 	return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1206 }
1207 static DEVICE_ATTR_RO(configurable);
1208 
1209 static ssize_t clients_show(struct device *dev,
1210 			    struct device_attribute *attr, char *buf)
1211 {
1212 	struct idxd_device *idxd = confdev_to_idxd(dev);
1213 	int count = 0, i;
1214 
1215 	spin_lock(&idxd->dev_lock);
1216 	for (i = 0; i < idxd->max_wqs; i++) {
1217 		struct idxd_wq *wq = idxd->wqs[i];
1218 
1219 		count += wq->client_count;
1220 	}
1221 	spin_unlock(&idxd->dev_lock);
1222 
1223 	return sysfs_emit(buf, "%d\n", count);
1224 }
1225 static DEVICE_ATTR_RO(clients);
1226 
1227 static ssize_t pasid_enabled_show(struct device *dev,
1228 				  struct device_attribute *attr, char *buf)
1229 {
1230 	struct idxd_device *idxd = confdev_to_idxd(dev);
1231 
1232 	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1233 }
1234 static DEVICE_ATTR_RO(pasid_enabled);
1235 
1236 static ssize_t state_show(struct device *dev,
1237 			  struct device_attribute *attr, char *buf)
1238 {
1239 	struct idxd_device *idxd = confdev_to_idxd(dev);
1240 
1241 	switch (idxd->state) {
1242 	case IDXD_DEV_DISABLED:
1243 		return sysfs_emit(buf, "disabled\n");
1244 	case IDXD_DEV_ENABLED:
1245 		return sysfs_emit(buf, "enabled\n");
1246 	case IDXD_DEV_HALTED:
1247 		return sysfs_emit(buf, "halted\n");
1248 	}
1249 
1250 	return sysfs_emit(buf, "unknown\n");
1251 }
1252 static DEVICE_ATTR_RO(state);
1253 
1254 static ssize_t errors_show(struct device *dev,
1255 			   struct device_attribute *attr, char *buf)
1256 {
1257 	struct idxd_device *idxd = confdev_to_idxd(dev);
1258 	int i, out = 0;
1259 
1260 	spin_lock(&idxd->dev_lock);
1261 	for (i = 0; i < 4; i++)
1262 		out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1263 	spin_unlock(&idxd->dev_lock);
1264 	out--;
1265 	out += sysfs_emit_at(buf, out, "\n");
1266 	return out;
1267 }
1268 static DEVICE_ATTR_RO(errors);
1269 
1270 static ssize_t max_read_buffers_show(struct device *dev,
1271 				     struct device_attribute *attr, char *buf)
1272 {
1273 	struct idxd_device *idxd = confdev_to_idxd(dev);
1274 
1275 	return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
1276 }
1277 
1278 static ssize_t max_tokens_show(struct device *dev,
1279 			       struct device_attribute *attr, char *buf)
1280 {
1281 	dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
1282 	return max_read_buffers_show(dev, attr, buf);
1283 }
1284 
1285 static DEVICE_ATTR_RO(max_tokens);	/* deprecated */
1286 static DEVICE_ATTR_RO(max_read_buffers);
1287 
1288 static ssize_t read_buffer_limit_show(struct device *dev,
1289 				      struct device_attribute *attr, char *buf)
1290 {
1291 	struct idxd_device *idxd = confdev_to_idxd(dev);
1292 
1293 	return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
1294 }
1295 
1296 static ssize_t token_limit_show(struct device *dev,
1297 				struct device_attribute *attr, char *buf)
1298 {
1299 	dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
1300 	return read_buffer_limit_show(dev, attr, buf);
1301 }
1302 
1303 static ssize_t read_buffer_limit_store(struct device *dev,
1304 				       struct device_attribute *attr,
1305 				       const char *buf, size_t count)
1306 {
1307 	struct idxd_device *idxd = confdev_to_idxd(dev);
1308 	unsigned long val;
1309 	int rc;
1310 
1311 	rc = kstrtoul(buf, 10, &val);
1312 	if (rc < 0)
1313 		return -EINVAL;
1314 
1315 	if (idxd->state == IDXD_DEV_ENABLED)
1316 		return -EPERM;
1317 
1318 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1319 		return -EPERM;
1320 
1321 	if (!idxd->hw.group_cap.rdbuf_limit)
1322 		return -EPERM;
1323 
1324 	if (val > idxd->hw.group_cap.total_rdbufs)
1325 		return -EINVAL;
1326 
1327 	idxd->rdbuf_limit = val;
1328 	return count;
1329 }
1330 
1331 static ssize_t token_limit_store(struct device *dev,
1332 				 struct device_attribute *attr,
1333 				 const char *buf, size_t count)
1334 {
1335 	dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
1336 	return read_buffer_limit_store(dev, attr, buf, count);
1337 }
1338 
1339 static DEVICE_ATTR_RW(token_limit);	/* deprecated */
1340 static DEVICE_ATTR_RW(read_buffer_limit);
1341 
1342 static ssize_t cdev_major_show(struct device *dev,
1343 			       struct device_attribute *attr, char *buf)
1344 {
1345 	struct idxd_device *idxd = confdev_to_idxd(dev);
1346 
1347 	return sysfs_emit(buf, "%u\n", idxd->major);
1348 }
1349 static DEVICE_ATTR_RO(cdev_major);
1350 
1351 static ssize_t cmd_status_show(struct device *dev,
1352 			       struct device_attribute *attr, char *buf)
1353 {
1354 	struct idxd_device *idxd = confdev_to_idxd(dev);
1355 
1356 	return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1357 }
1358 
1359 static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
1360 				const char *buf, size_t count)
1361 {
1362 	struct idxd_device *idxd = confdev_to_idxd(dev);
1363 
1364 	idxd->cmd_status = 0;
1365 	return count;
1366 }
1367 static DEVICE_ATTR_RW(cmd_status);
1368 
1369 static struct attribute *idxd_device_attributes[] = {
1370 	&dev_attr_version.attr,
1371 	&dev_attr_max_groups.attr,
1372 	&dev_attr_max_work_queues.attr,
1373 	&dev_attr_max_work_queues_size.attr,
1374 	&dev_attr_max_engines.attr,
1375 	&dev_attr_numa_node.attr,
1376 	&dev_attr_max_batch_size.attr,
1377 	&dev_attr_max_transfer_size.attr,
1378 	&dev_attr_op_cap.attr,
1379 	&dev_attr_gen_cap.attr,
1380 	&dev_attr_configurable.attr,
1381 	&dev_attr_clients.attr,
1382 	&dev_attr_pasid_enabled.attr,
1383 	&dev_attr_state.attr,
1384 	&dev_attr_errors.attr,
1385 	&dev_attr_max_tokens.attr,
1386 	&dev_attr_max_read_buffers.attr,
1387 	&dev_attr_token_limit.attr,
1388 	&dev_attr_read_buffer_limit.attr,
1389 	&dev_attr_cdev_major.attr,
1390 	&dev_attr_cmd_status.attr,
1391 	NULL,
1392 };
1393 
1394 static const struct attribute_group idxd_device_attribute_group = {
1395 	.attrs = idxd_device_attributes,
1396 };
1397 
1398 static const struct attribute_group *idxd_attribute_groups[] = {
1399 	&idxd_device_attribute_group,
1400 	NULL,
1401 };
1402 
1403 static void idxd_conf_device_release(struct device *dev)
1404 {
1405 	struct idxd_device *idxd = confdev_to_idxd(dev);
1406 
1407 	kfree(idxd->groups);
1408 	kfree(idxd->wqs);
1409 	kfree(idxd->engines);
1410 	ida_free(&idxd_ida, idxd->id);
1411 	kfree(idxd);
1412 }
1413 
1414 struct device_type dsa_device_type = {
1415 	.name = "dsa",
1416 	.release = idxd_conf_device_release,
1417 	.groups = idxd_attribute_groups,
1418 };
1419 
1420 struct device_type iax_device_type = {
1421 	.name = "iax",
1422 	.release = idxd_conf_device_release,
1423 	.groups = idxd_attribute_groups,
1424 };
1425 
1426 static int idxd_register_engine_devices(struct idxd_device *idxd)
1427 {
1428 	struct idxd_engine *engine;
1429 	int i, j, rc;
1430 
1431 	for (i = 0; i < idxd->max_engines; i++) {
1432 		engine = idxd->engines[i];
1433 		rc = device_add(engine_confdev(engine));
1434 		if (rc < 0)
1435 			goto cleanup;
1436 	}
1437 
1438 	return 0;
1439 
1440 cleanup:
1441 	j = i - 1;
1442 	for (; i < idxd->max_engines; i++) {
1443 		engine = idxd->engines[i];
1444 		put_device(engine_confdev(engine));
1445 	}
1446 
1447 	while (j--) {
1448 		engine = idxd->engines[j];
1449 		device_unregister(engine_confdev(engine));
1450 	}
1451 	return rc;
1452 }
1453 
1454 static int idxd_register_group_devices(struct idxd_device *idxd)
1455 {
1456 	struct idxd_group *group;
1457 	int i, j, rc;
1458 
1459 	for (i = 0; i < idxd->max_groups; i++) {
1460 		group = idxd->groups[i];
1461 		rc = device_add(group_confdev(group));
1462 		if (rc < 0)
1463 			goto cleanup;
1464 	}
1465 
1466 	return 0;
1467 
1468 cleanup:
1469 	j = i - 1;
1470 	for (; i < idxd->max_groups; i++) {
1471 		group = idxd->groups[i];
1472 		put_device(group_confdev(group));
1473 	}
1474 
1475 	while (j--) {
1476 		group = idxd->groups[j];
1477 		device_unregister(group_confdev(group));
1478 	}
1479 	return rc;
1480 }
1481 
1482 static int idxd_register_wq_devices(struct idxd_device *idxd)
1483 {
1484 	struct idxd_wq *wq;
1485 	int i, rc, j;
1486 
1487 	for (i = 0; i < idxd->max_wqs; i++) {
1488 		wq = idxd->wqs[i];
1489 		rc = device_add(wq_confdev(wq));
1490 		if (rc < 0)
1491 			goto cleanup;
1492 	}
1493 
1494 	return 0;
1495 
1496 cleanup:
1497 	j = i - 1;
1498 	for (; i < idxd->max_wqs; i++) {
1499 		wq = idxd->wqs[i];
1500 		put_device(wq_confdev(wq));
1501 	}
1502 
1503 	while (j--) {
1504 		wq = idxd->wqs[j];
1505 		device_unregister(wq_confdev(wq));
1506 	}
1507 	return rc;
1508 }
1509 
1510 int idxd_register_devices(struct idxd_device *idxd)
1511 {
1512 	struct device *dev = &idxd->pdev->dev;
1513 	int rc, i;
1514 
1515 	rc = device_add(idxd_confdev(idxd));
1516 	if (rc < 0)
1517 		return rc;
1518 
1519 	rc = idxd_register_wq_devices(idxd);
1520 	if (rc < 0) {
1521 		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1522 		goto err_wq;
1523 	}
1524 
1525 	rc = idxd_register_engine_devices(idxd);
1526 	if (rc < 0) {
1527 		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1528 		goto err_engine;
1529 	}
1530 
1531 	rc = idxd_register_group_devices(idxd);
1532 	if (rc < 0) {
1533 		dev_dbg(dev, "Group device registering failed: %d\n", rc);
1534 		goto err_group;
1535 	}
1536 
1537 	return 0;
1538 
1539  err_group:
1540 	for (i = 0; i < idxd->max_engines; i++)
1541 		device_unregister(engine_confdev(idxd->engines[i]));
1542  err_engine:
1543 	for (i = 0; i < idxd->max_wqs; i++)
1544 		device_unregister(wq_confdev(idxd->wqs[i]));
1545  err_wq:
1546 	device_del(idxd_confdev(idxd));
1547 	return rc;
1548 }
1549 
1550 void idxd_unregister_devices(struct idxd_device *idxd)
1551 {
1552 	int i;
1553 
1554 	for (i = 0; i < idxd->max_wqs; i++) {
1555 		struct idxd_wq *wq = idxd->wqs[i];
1556 
1557 		device_unregister(wq_confdev(wq));
1558 	}
1559 
1560 	for (i = 0; i < idxd->max_engines; i++) {
1561 		struct idxd_engine *engine = idxd->engines[i];
1562 
1563 		device_unregister(engine_confdev(engine));
1564 	}
1565 
1566 	for (i = 0; i < idxd->max_groups; i++) {
1567 		struct idxd_group *group = idxd->groups[i];
1568 
1569 		device_unregister(group_confdev(group));
1570 	}
1571 }
1572 
1573 int idxd_register_bus_type(void)
1574 {
1575 	return bus_register(&dsa_bus_type);
1576 }
1577 
1578 void idxd_unregister_bus_type(void)
1579 {
1580 	bus_unregister(&dsa_bus_type);
1581 }
1582