xref: /openbmc/linux/drivers/block/aoe/aoechr.c (revision a04b41cd)
1 /* Copyright (c) 2012 Coraid, Inc.  See COPYING for GPL terms. */
2 /*
3  * aoechr.c
4  * AoE character device driver
5  */
6 
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/skbuff.h>
14 #include <linux/export.h>
15 #include "aoe.h"
16 
17 enum {
18 	//MINOR_STAT = 1, (moved to sysfs)
19 	MINOR_ERR = 2,
20 	MINOR_DISCOVER,
21 	MINOR_INTERFACES,
22 	MINOR_REVALIDATE,
23 	MINOR_FLUSH,
24 	MSGSZ = 2048,
25 	NMSG = 100,		/* message backlog to retain */
26 };
27 
28 struct aoe_chardev {
29 	ulong minor;
30 	char name[32];
31 };
32 
33 enum { EMFL_VALID = 1 };
34 
35 struct ErrMsg {
36 	short flags;
37 	short len;
38 	char *msg;
39 };
40 
41 static DEFINE_MUTEX(aoechr_mutex);
42 
43 /* A ring buffer of error messages, to be read through
44  * "/dev/etherd/err".  When no messages are present,
45  * readers will block waiting for messages to appear.
46  */
47 static struct ErrMsg emsgs[NMSG];
48 static int emsgs_head_idx, emsgs_tail_idx;
49 static struct completion emsgs_comp;
50 static spinlock_t emsgs_lock;
51 static int nblocked_emsgs_readers;
52 static struct class *aoe_class;
53 static struct aoe_chardev chardevs[] = {
54 	{ MINOR_ERR, "err" },
55 	{ MINOR_DISCOVER, "discover" },
56 	{ MINOR_INTERFACES, "interfaces" },
57 	{ MINOR_REVALIDATE, "revalidate" },
58 	{ MINOR_FLUSH, "flush" },
59 };
60 
61 static int
62 discover(void)
63 {
64 	aoecmd_cfg(0xffff, 0xff);
65 	return 0;
66 }
67 
68 static int
69 interfaces(const char __user *str, size_t size)
70 {
71 	if (set_aoe_iflist(str, size)) {
72 		printk(KERN_ERR
73 			"aoe: could not set interface list: too many interfaces\n");
74 		return -EINVAL;
75 	}
76 	return 0;
77 }
78 
79 static int
80 revalidate(const char __user *str, size_t size)
81 {
82 	int major, minor, n;
83 	ulong flags;
84 	struct aoedev *d;
85 	struct sk_buff *skb;
86 	char buf[16];
87 
88 	if (size >= sizeof buf)
89 		return -EINVAL;
90 	buf[sizeof buf - 1] = '\0';
91 	if (copy_from_user(buf, str, size))
92 		return -EFAULT;
93 
94 	n = sscanf(buf, "e%d.%d", &major, &minor);
95 	if (n != 2) {
96 		pr_err("aoe: invalid device specification %s\n", buf);
97 		return -EINVAL;
98 	}
99 	d = aoedev_by_aoeaddr(major, minor, 0);
100 	if (!d)
101 		return -EINVAL;
102 	spin_lock_irqsave(&d->lock, flags);
103 	aoecmd_cleanslate(d);
104 	aoecmd_cfg(major, minor);
105 loop:
106 	skb = aoecmd_ata_id(d);
107 	spin_unlock_irqrestore(&d->lock, flags);
108 	/* try again if we are able to sleep a bit,
109 	 * otherwise give up this revalidation
110 	 */
111 	if (!skb && !msleep_interruptible(250)) {
112 		spin_lock_irqsave(&d->lock, flags);
113 		goto loop;
114 	}
115 	aoedev_put(d);
116 	if (skb) {
117 		struct sk_buff_head queue;
118 		__skb_queue_head_init(&queue);
119 		__skb_queue_tail(&queue, skb);
120 		aoenet_xmit(&queue);
121 	}
122 	return 0;
123 }
124 
125 void
126 aoechr_error(char *msg)
127 {
128 	struct ErrMsg *em;
129 	char *mp;
130 	ulong flags, n;
131 
132 	n = strlen(msg);
133 
134 	spin_lock_irqsave(&emsgs_lock, flags);
135 
136 	em = emsgs + emsgs_tail_idx;
137 	if ((em->flags & EMFL_VALID)) {
138 bail:		spin_unlock_irqrestore(&emsgs_lock, flags);
139 		return;
140 	}
141 
142 	mp = kmalloc(n, GFP_ATOMIC);
143 	if (mp == NULL) {
144 		printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
145 		goto bail;
146 	}
147 
148 	memcpy(mp, msg, n);
149 	em->msg = mp;
150 	em->flags |= EMFL_VALID;
151 	em->len = n;
152 
153 	emsgs_tail_idx++;
154 	emsgs_tail_idx %= ARRAY_SIZE(emsgs);
155 
156 	spin_unlock_irqrestore(&emsgs_lock, flags);
157 
158 	if (nblocked_emsgs_readers)
159 		complete(&emsgs_comp);
160 }
161 
162 static ssize_t
163 aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
164 {
165 	int ret = -EINVAL;
166 
167 	switch ((unsigned long) filp->private_data) {
168 	default:
169 		printk(KERN_INFO "aoe: can't write to that file.\n");
170 		break;
171 	case MINOR_DISCOVER:
172 		ret = discover();
173 		break;
174 	case MINOR_INTERFACES:
175 		ret = interfaces(buf, cnt);
176 		break;
177 	case MINOR_REVALIDATE:
178 		ret = revalidate(buf, cnt);
179 		break;
180 	case MINOR_FLUSH:
181 		ret = aoedev_flush(buf, cnt);
182 		break;
183 	}
184 	if (ret == 0)
185 		ret = cnt;
186 	return ret;
187 }
188 
189 static int
190 aoechr_open(struct inode *inode, struct file *filp)
191 {
192 	int n, i;
193 
194 	mutex_lock(&aoechr_mutex);
195 	n = iminor(inode);
196 	filp->private_data = (void *) (unsigned long) n;
197 
198 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
199 		if (chardevs[i].minor == n) {
200 			mutex_unlock(&aoechr_mutex);
201 			return 0;
202 		}
203 	mutex_unlock(&aoechr_mutex);
204 	return -EINVAL;
205 }
206 
207 static int
208 aoechr_rel(struct inode *inode, struct file *filp)
209 {
210 	return 0;
211 }
212 
213 static ssize_t
214 aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
215 {
216 	unsigned long n;
217 	char *mp;
218 	struct ErrMsg *em;
219 	ssize_t len;
220 	ulong flags;
221 
222 	n = (unsigned long) filp->private_data;
223 	if (n != MINOR_ERR)
224 		return -EFAULT;
225 
226 	spin_lock_irqsave(&emsgs_lock, flags);
227 
228 	for (;;) {
229 		em = emsgs + emsgs_head_idx;
230 		if ((em->flags & EMFL_VALID) != 0)
231 			break;
232 		if (filp->f_flags & O_NDELAY) {
233 			spin_unlock_irqrestore(&emsgs_lock, flags);
234 			return -EAGAIN;
235 		}
236 		nblocked_emsgs_readers++;
237 
238 		spin_unlock_irqrestore(&emsgs_lock, flags);
239 
240 		n = wait_for_completion_interruptible(&emsgs_comp);
241 
242 		spin_lock_irqsave(&emsgs_lock, flags);
243 
244 		nblocked_emsgs_readers--;
245 
246 		if (n) {
247 			spin_unlock_irqrestore(&emsgs_lock, flags);
248 			return -ERESTARTSYS;
249 		}
250 	}
251 	if (em->len > cnt) {
252 		spin_unlock_irqrestore(&emsgs_lock, flags);
253 		return -EAGAIN;
254 	}
255 	mp = em->msg;
256 	len = em->len;
257 	em->msg = NULL;
258 	em->flags &= ~EMFL_VALID;
259 
260 	emsgs_head_idx++;
261 	emsgs_head_idx %= ARRAY_SIZE(emsgs);
262 
263 	spin_unlock_irqrestore(&emsgs_lock, flags);
264 
265 	n = copy_to_user(buf, mp, len);
266 	kfree(mp);
267 	return n == 0 ? len : -EFAULT;
268 }
269 
270 static const struct file_operations aoe_fops = {
271 	.write = aoechr_write,
272 	.read = aoechr_read,
273 	.open = aoechr_open,
274 	.release = aoechr_rel,
275 	.owner = THIS_MODULE,
276 	.llseek = noop_llseek,
277 };
278 
279 static char *aoe_devnode(struct device *dev, umode_t *mode)
280 {
281 	return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
282 }
283 
284 int __init
285 aoechr_init(void)
286 {
287 	int n, i;
288 
289 	n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
290 	if (n < 0) {
291 		printk(KERN_ERR "aoe: can't register char device\n");
292 		return n;
293 	}
294 	init_completion(&emsgs_comp);
295 	spin_lock_init(&emsgs_lock);
296 	aoe_class = class_create(THIS_MODULE, "aoe");
297 	if (IS_ERR(aoe_class)) {
298 		unregister_chrdev(AOE_MAJOR, "aoechr");
299 		return PTR_ERR(aoe_class);
300 	}
301 	aoe_class->devnode = aoe_devnode;
302 
303 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
304 		device_create(aoe_class, NULL,
305 			      MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
306 			      chardevs[i].name);
307 
308 	return 0;
309 }
310 
311 void
312 aoechr_exit(void)
313 {
314 	int i;
315 
316 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
317 		device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
318 	class_destroy(aoe_class);
319 	unregister_chrdev(AOE_MAJOR, "aoechr");
320 }
321 
322