xref: /openbmc/linux/drivers/block/aoe/aoechr.c (revision 896831f5)
1 /* Copyright (c) 2007 Coraid, Inc.  See COPYING for GPL terms. */
2 /*
3  * aoechr.c
4  * AoE character device driver
5  */
6 
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/skbuff.h>
14 #include <linux/export.h>
15 #include "aoe.h"
16 
17 enum {
18 	//MINOR_STAT = 1, (moved to sysfs)
19 	MINOR_ERR = 2,
20 	MINOR_DISCOVER,
21 	MINOR_INTERFACES,
22 	MINOR_REVALIDATE,
23 	MINOR_FLUSH,
24 	MSGSZ = 2048,
25 	NMSG = 100,		/* message backlog to retain */
26 };
27 
28 struct aoe_chardev {
29 	ulong minor;
30 	char name[32];
31 };
32 
33 enum { EMFL_VALID = 1 };
34 
35 struct ErrMsg {
36 	short flags;
37 	short len;
38 	char *msg;
39 };
40 
41 static DEFINE_MUTEX(aoechr_mutex);
42 static struct ErrMsg emsgs[NMSG];
43 static int emsgs_head_idx, emsgs_tail_idx;
44 static struct completion emsgs_comp;
45 static spinlock_t emsgs_lock;
46 static int nblocked_emsgs_readers;
47 static struct class *aoe_class;
48 static struct aoe_chardev chardevs[] = {
49 	{ MINOR_ERR, "err" },
50 	{ MINOR_DISCOVER, "discover" },
51 	{ MINOR_INTERFACES, "interfaces" },
52 	{ MINOR_REVALIDATE, "revalidate" },
53 	{ MINOR_FLUSH, "flush" },
54 };
55 
56 static int
57 discover(void)
58 {
59 	aoecmd_cfg(0xffff, 0xff);
60 	return 0;
61 }
62 
63 static int
64 interfaces(const char __user *str, size_t size)
65 {
66 	if (set_aoe_iflist(str, size)) {
67 		printk(KERN_ERR
68 			"aoe: could not set interface list: too many interfaces\n");
69 		return -EINVAL;
70 	}
71 	return 0;
72 }
73 
74 static int
75 revalidate(const char __user *str, size_t size)
76 {
77 	int major, minor, n;
78 	ulong flags;
79 	struct aoedev *d;
80 	struct sk_buff *skb;
81 	char buf[16];
82 
83 	if (size >= sizeof buf)
84 		return -EINVAL;
85 	buf[sizeof buf - 1] = '\0';
86 	if (copy_from_user(buf, str, size))
87 		return -EFAULT;
88 
89 	n = sscanf(buf, "e%d.%d", &major, &minor);
90 	if (n != 2) {
91 		pr_err("aoe: invalid device specification %s\n", buf);
92 		return -EINVAL;
93 	}
94 	d = aoedev_by_aoeaddr(major, minor);
95 	if (!d)
96 		return -EINVAL;
97 	spin_lock_irqsave(&d->lock, flags);
98 	aoecmd_cleanslate(d);
99 loop:
100 	skb = aoecmd_ata_id(d);
101 	spin_unlock_irqrestore(&d->lock, flags);
102 	/* try again if we are able to sleep a bit,
103 	 * otherwise give up this revalidation
104 	 */
105 	if (!skb && !msleep_interruptible(200)) {
106 		spin_lock_irqsave(&d->lock, flags);
107 		goto loop;
108 	}
109 	if (skb) {
110 		struct sk_buff_head queue;
111 		__skb_queue_head_init(&queue);
112 		__skb_queue_tail(&queue, skb);
113 		aoenet_xmit(&queue);
114 	}
115 	aoecmd_cfg(major, minor);
116 	return 0;
117 }
118 
119 void
120 aoechr_error(char *msg)
121 {
122 	struct ErrMsg *em;
123 	char *mp;
124 	ulong flags, n;
125 
126 	n = strlen(msg);
127 
128 	spin_lock_irqsave(&emsgs_lock, flags);
129 
130 	em = emsgs + emsgs_tail_idx;
131 	if ((em->flags & EMFL_VALID)) {
132 bail:		spin_unlock_irqrestore(&emsgs_lock, flags);
133 		return;
134 	}
135 
136 	mp = kmalloc(n, GFP_ATOMIC);
137 	if (mp == NULL) {
138 		printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
139 		goto bail;
140 	}
141 
142 	memcpy(mp, msg, n);
143 	em->msg = mp;
144 	em->flags |= EMFL_VALID;
145 	em->len = n;
146 
147 	emsgs_tail_idx++;
148 	emsgs_tail_idx %= ARRAY_SIZE(emsgs);
149 
150 	spin_unlock_irqrestore(&emsgs_lock, flags);
151 
152 	if (nblocked_emsgs_readers)
153 		complete(&emsgs_comp);
154 }
155 
156 static ssize_t
157 aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
158 {
159 	int ret = -EINVAL;
160 
161 	switch ((unsigned long) filp->private_data) {
162 	default:
163 		printk(KERN_INFO "aoe: can't write to that file.\n");
164 		break;
165 	case MINOR_DISCOVER:
166 		ret = discover();
167 		break;
168 	case MINOR_INTERFACES:
169 		ret = interfaces(buf, cnt);
170 		break;
171 	case MINOR_REVALIDATE:
172 		ret = revalidate(buf, cnt);
173 		break;
174 	case MINOR_FLUSH:
175 		ret = aoedev_flush(buf, cnt);
176 	}
177 	if (ret == 0)
178 		ret = cnt;
179 	return ret;
180 }
181 
182 static int
183 aoechr_open(struct inode *inode, struct file *filp)
184 {
185 	int n, i;
186 
187 	mutex_lock(&aoechr_mutex);
188 	n = iminor(inode);
189 	filp->private_data = (void *) (unsigned long) n;
190 
191 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
192 		if (chardevs[i].minor == n) {
193 			mutex_unlock(&aoechr_mutex);
194 			return 0;
195 		}
196 	mutex_unlock(&aoechr_mutex);
197 	return -EINVAL;
198 }
199 
200 static int
201 aoechr_rel(struct inode *inode, struct file *filp)
202 {
203 	return 0;
204 }
205 
206 static ssize_t
207 aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
208 {
209 	unsigned long n;
210 	char *mp;
211 	struct ErrMsg *em;
212 	ssize_t len;
213 	ulong flags;
214 
215 	n = (unsigned long) filp->private_data;
216 	if (n != MINOR_ERR)
217 		return -EFAULT;
218 
219 	spin_lock_irqsave(&emsgs_lock, flags);
220 
221 	for (;;) {
222 		em = emsgs + emsgs_head_idx;
223 		if ((em->flags & EMFL_VALID) != 0)
224 			break;
225 		if (filp->f_flags & O_NDELAY) {
226 			spin_unlock_irqrestore(&emsgs_lock, flags);
227 			return -EAGAIN;
228 		}
229 		nblocked_emsgs_readers++;
230 
231 		spin_unlock_irqrestore(&emsgs_lock, flags);
232 
233 		n = wait_for_completion_interruptible(&emsgs_comp);
234 
235 		spin_lock_irqsave(&emsgs_lock, flags);
236 
237 		nblocked_emsgs_readers--;
238 
239 		if (n) {
240 			spin_unlock_irqrestore(&emsgs_lock, flags);
241 			return -ERESTARTSYS;
242 		}
243 	}
244 	if (em->len > cnt) {
245 		spin_unlock_irqrestore(&emsgs_lock, flags);
246 		return -EAGAIN;
247 	}
248 	mp = em->msg;
249 	len = em->len;
250 	em->msg = NULL;
251 	em->flags &= ~EMFL_VALID;
252 
253 	emsgs_head_idx++;
254 	emsgs_head_idx %= ARRAY_SIZE(emsgs);
255 
256 	spin_unlock_irqrestore(&emsgs_lock, flags);
257 
258 	n = copy_to_user(buf, mp, len);
259 	kfree(mp);
260 	return n == 0 ? len : -EFAULT;
261 }
262 
263 static const struct file_operations aoe_fops = {
264 	.write = aoechr_write,
265 	.read = aoechr_read,
266 	.open = aoechr_open,
267 	.release = aoechr_rel,
268 	.owner = THIS_MODULE,
269 	.llseek = noop_llseek,
270 };
271 
272 static char *aoe_devnode(struct device *dev, umode_t *mode)
273 {
274 	return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
275 }
276 
277 int __init
278 aoechr_init(void)
279 {
280 	int n, i;
281 
282 	n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
283 	if (n < 0) {
284 		printk(KERN_ERR "aoe: can't register char device\n");
285 		return n;
286 	}
287 	init_completion(&emsgs_comp);
288 	spin_lock_init(&emsgs_lock);
289 	aoe_class = class_create(THIS_MODULE, "aoe");
290 	if (IS_ERR(aoe_class)) {
291 		unregister_chrdev(AOE_MAJOR, "aoechr");
292 		return PTR_ERR(aoe_class);
293 	}
294 	aoe_class->devnode = aoe_devnode;
295 
296 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
297 		device_create(aoe_class, NULL,
298 			      MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
299 			      chardevs[i].name);
300 
301 	return 0;
302 }
303 
304 void
305 aoechr_exit(void)
306 {
307 	int i;
308 
309 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
310 		device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
311 	class_destroy(aoe_class);
312 	unregister_chrdev(AOE_MAJOR, "aoechr");
313 }
314 
315