xref: /openbmc/linux/drivers/dma/ioat/sysfs.c (revision 26e9baa8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel I/OAT DMA Linux driver
4  * Copyright(c) 2004 - 2015 Intel Corporation.
5  */
6 
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/dmaengine.h>
10 #include <linux/pci.h>
11 #include "dma.h"
12 #include "registers.h"
13 #include "hw.h"
14 
15 #include "../dmaengine.h"
16 
cap_show(struct dma_chan * c,char * page)17 static ssize_t cap_show(struct dma_chan *c, char *page)
18 {
19 	struct dma_device *dma = c->device;
20 
21 	return sprintf(page, "copy%s%s%s%s%s\n",
22 		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
23 		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
24 		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
25 		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
26 		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
27 
28 }
29 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
30 
version_show(struct dma_chan * c,char * page)31 static ssize_t version_show(struct dma_chan *c, char *page)
32 {
33 	struct dma_device *dma = c->device;
34 	struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
35 
36 	return sprintf(page, "%d.%d\n",
37 		       ioat_dma->version >> 4, ioat_dma->version & 0xf);
38 }
39 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
40 
41 static ssize_t
ioat_attr_show(struct kobject * kobj,struct attribute * attr,char * page)42 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
43 {
44 	struct ioat_sysfs_entry *entry;
45 	struct ioatdma_chan *ioat_chan;
46 
47 	entry = container_of(attr, struct ioat_sysfs_entry, attr);
48 	ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
49 
50 	if (!entry->show)
51 		return -EIO;
52 	return entry->show(&ioat_chan->dma_chan, page);
53 }
54 
55 static ssize_t
ioat_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t count)56 ioat_attr_store(struct kobject *kobj, struct attribute *attr,
57 const char *page, size_t count)
58 {
59 	struct ioat_sysfs_entry *entry;
60 	struct ioatdma_chan *ioat_chan;
61 
62 	entry = container_of(attr, struct ioat_sysfs_entry, attr);
63 	ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
64 
65 	if (!entry->store)
66 		return -EIO;
67 	return entry->store(&ioat_chan->dma_chan, page, count);
68 }
69 
70 const struct sysfs_ops ioat_sysfs_ops = {
71 	.show	= ioat_attr_show,
72 	.store  = ioat_attr_store,
73 };
74 
ioat_kobject_add(struct ioatdma_device * ioat_dma,struct kobj_type * type)75 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
76 {
77 	struct dma_device *dma = &ioat_dma->dma_dev;
78 	struct dma_chan *c;
79 
80 	list_for_each_entry(c, &dma->channels, device_node) {
81 		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
82 		struct kobject *parent = &c->dev->device.kobj;
83 		int err;
84 
85 		err = kobject_init_and_add(&ioat_chan->kobj, type,
86 					   parent, "quickdata");
87 		if (err) {
88 			dev_warn(to_dev(ioat_chan),
89 				 "sysfs init error (%d), continuing...\n", err);
90 			kobject_put(&ioat_chan->kobj);
91 			set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
92 		}
93 	}
94 }
95 
ioat_kobject_del(struct ioatdma_device * ioat_dma)96 void ioat_kobject_del(struct ioatdma_device *ioat_dma)
97 {
98 	struct dma_device *dma = &ioat_dma->dma_dev;
99 	struct dma_chan *c;
100 
101 	list_for_each_entry(c, &dma->channels, device_node) {
102 		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
103 
104 		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
105 			kobject_del(&ioat_chan->kobj);
106 			kobject_put(&ioat_chan->kobj);
107 		}
108 	}
109 }
110 
ring_size_show(struct dma_chan * c,char * page)111 static ssize_t ring_size_show(struct dma_chan *c, char *page)
112 {
113 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
114 
115 	return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
116 }
117 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
118 
ring_active_show(struct dma_chan * c,char * page)119 static ssize_t ring_active_show(struct dma_chan *c, char *page)
120 {
121 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
122 
123 	/* ...taken outside the lock, no need to be precise */
124 	return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
125 }
126 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
127 
intr_coalesce_show(struct dma_chan * c,char * page)128 static ssize_t intr_coalesce_show(struct dma_chan *c, char *page)
129 {
130 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
131 
132 	return sprintf(page, "%d\n", ioat_chan->intr_coalesce);
133 }
134 
intr_coalesce_store(struct dma_chan * c,const char * page,size_t count)135 static ssize_t intr_coalesce_store(struct dma_chan *c, const char *page,
136 size_t count)
137 {
138 	int intr_coalesce = 0;
139 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
140 
141 	if (sscanf(page, "%du", &intr_coalesce) != -1) {
142 		if ((intr_coalesce < 0) ||
143 		    (intr_coalesce > IOAT_INTRDELAY_MASK))
144 			return -EINVAL;
145 		ioat_chan->intr_coalesce = intr_coalesce;
146 	}
147 
148 	return count;
149 }
150 
151 static struct ioat_sysfs_entry intr_coalesce_attr = __ATTR_RW(intr_coalesce);
152 
153 static struct attribute *ioat_attrs[] = {
154 	&ring_size_attr.attr,
155 	&ring_active_attr.attr,
156 	&ioat_cap_attr.attr,
157 	&ioat_version_attr.attr,
158 	&intr_coalesce_attr.attr,
159 	NULL,
160 };
161 ATTRIBUTE_GROUPS(ioat);
162 
163 struct kobj_type ioat_ktype = {
164 	.sysfs_ops = &ioat_sysfs_ops,
165 	.default_groups = ioat_groups,
166 };
167