xref: /openbmc/linux/net/9p/trans_xen.c (revision e6dec923)
1 /*
2  * linux/fs/9p/trans_xen
3  *
4  * Xen transport layer.
5  *
6  * Copyright (C) 2017 by Stefano Stabellini <stefano@aporeto.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #include <xen/events.h>
34 #include <xen/grant_table.h>
35 #include <xen/xen.h>
36 #include <xen/xenbus.h>
37 #include <xen/interface/io/9pfs.h>
38 
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/rwlock.h>
42 #include <net/9p/9p.h>
43 #include <net/9p/client.h>
44 #include <net/9p/transport.h>
45 
46 #define XEN_9PFS_NUM_RINGS 2
47 #define XEN_9PFS_RING_ORDER 6
48 #define XEN_9PFS_RING_SIZE  XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER)
49 
50 struct xen_9pfs_header {
51 	uint32_t size;
52 	uint8_t id;
53 	uint16_t tag;
54 
55 	/* uint8_t sdata[]; */
56 } __attribute__((packed));
57 
58 /* One per ring, more than one per 9pfs share */
59 struct xen_9pfs_dataring {
60 	struct xen_9pfs_front_priv *priv;
61 
62 	struct xen_9pfs_data_intf *intf;
63 	grant_ref_t ref;
64 	int evtchn;
65 	int irq;
66 	/* protect a ring from concurrent accesses */
67 	spinlock_t lock;
68 
69 	struct xen_9pfs_data data;
70 	wait_queue_head_t wq;
71 	struct work_struct work;
72 };
73 
74 /* One per 9pfs share */
75 struct xen_9pfs_front_priv {
76 	struct list_head list;
77 	struct xenbus_device *dev;
78 	char *tag;
79 	struct p9_client *client;
80 
81 	int num_rings;
82 	struct xen_9pfs_dataring *rings;
83 };
84 
85 static LIST_HEAD(xen_9pfs_devs);
86 static DEFINE_RWLOCK(xen_9pfs_lock);
87 
88 /* We don't currently allow canceling of requests */
89 static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
90 {
91 	return 1;
92 }
93 
94 static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
95 {
96 	struct xen_9pfs_front_priv *priv;
97 
98 	read_lock(&xen_9pfs_lock);
99 	list_for_each_entry(priv, &xen_9pfs_devs, list) {
100 		if (!strcmp(priv->tag, addr)) {
101 			priv->client = client;
102 			read_unlock(&xen_9pfs_lock);
103 			return 0;
104 		}
105 	}
106 	read_unlock(&xen_9pfs_lock);
107 	return -EINVAL;
108 }
109 
110 static void p9_xen_close(struct p9_client *client)
111 {
112 	struct xen_9pfs_front_priv *priv;
113 
114 	read_lock(&xen_9pfs_lock);
115 	list_for_each_entry(priv, &xen_9pfs_devs, list) {
116 		if (priv->client == client) {
117 			priv->client = NULL;
118 			read_unlock(&xen_9pfs_lock);
119 			return;
120 		}
121 	}
122 	read_unlock(&xen_9pfs_lock);
123 }
124 
125 static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
126 {
127 	RING_IDX cons, prod;
128 
129 	cons = ring->intf->out_cons;
130 	prod = ring->intf->out_prod;
131 	virt_mb();
132 
133 	return XEN_9PFS_RING_SIZE -
134 		xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) >= size;
135 }
136 
137 static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
138 {
139 	struct xen_9pfs_front_priv *priv = NULL;
140 	RING_IDX cons, prod, masked_cons, masked_prod;
141 	unsigned long flags;
142 	u32 size = p9_req->tc->size;
143 	struct xen_9pfs_dataring *ring;
144 	int num;
145 
146 	read_lock(&xen_9pfs_lock);
147 	list_for_each_entry(priv, &xen_9pfs_devs, list) {
148 		if (priv->client == client)
149 			break;
150 	}
151 	read_unlock(&xen_9pfs_lock);
152 	if (!priv || priv->client != client)
153 		return -EINVAL;
154 
155 	num = p9_req->tc->tag % priv->num_rings;
156 	ring = &priv->rings[num];
157 
158 again:
159 	while (wait_event_interruptible(ring->wq,
160 					p9_xen_write_todo(ring, size)) != 0)
161 		;
162 
163 	spin_lock_irqsave(&ring->lock, flags);
164 	cons = ring->intf->out_cons;
165 	prod = ring->intf->out_prod;
166 	virt_mb();
167 
168 	if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons,
169 						 XEN_9PFS_RING_SIZE) < size) {
170 		spin_unlock_irqrestore(&ring->lock, flags);
171 		goto again;
172 	}
173 
174 	masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
175 	masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
176 
177 	xen_9pfs_write_packet(ring->data.out, p9_req->tc->sdata, size,
178 			      &masked_prod, masked_cons, XEN_9PFS_RING_SIZE);
179 
180 	p9_req->status = REQ_STATUS_SENT;
181 	virt_wmb();			/* write ring before updating pointer */
182 	prod += size;
183 	ring->intf->out_prod = prod;
184 	spin_unlock_irqrestore(&ring->lock, flags);
185 	notify_remote_via_irq(ring->irq);
186 
187 	return 0;
188 }
189 
190 static void p9_xen_response(struct work_struct *work)
191 {
192 	struct xen_9pfs_front_priv *priv;
193 	struct xen_9pfs_dataring *ring;
194 	RING_IDX cons, prod, masked_cons, masked_prod;
195 	struct xen_9pfs_header h;
196 	struct p9_req_t *req;
197 	int status;
198 
199 	ring = container_of(work, struct xen_9pfs_dataring, work);
200 	priv = ring->priv;
201 
202 	while (1) {
203 		cons = ring->intf->in_cons;
204 		prod = ring->intf->in_prod;
205 		virt_rmb();
206 
207 		if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) <
208 		    sizeof(h)) {
209 			notify_remote_via_irq(ring->irq);
210 			return;
211 		}
212 
213 		masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
214 		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
215 
216 		/* First, read just the header */
217 		xen_9pfs_read_packet(&h, ring->data.in, sizeof(h),
218 				     masked_prod, &masked_cons,
219 				     XEN_9PFS_RING_SIZE);
220 
221 		req = p9_tag_lookup(priv->client, h.tag);
222 		if (!req || req->status != REQ_STATUS_SENT) {
223 			dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
224 			cons += h.size;
225 			virt_mb();
226 			ring->intf->in_cons = cons;
227 			continue;
228 		}
229 
230 		memcpy(req->rc, &h, sizeof(h));
231 		req->rc->offset = 0;
232 
233 		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
234 		/* Then, read the whole packet (including the header) */
235 		xen_9pfs_read_packet(req->rc->sdata, ring->data.in, h.size,
236 				     masked_prod, &masked_cons,
237 				     XEN_9PFS_RING_SIZE);
238 
239 		virt_mb();
240 		cons += h.size;
241 		ring->intf->in_cons = cons;
242 
243 		status = (req->status != REQ_STATUS_ERROR) ?
244 			REQ_STATUS_RCVD : REQ_STATUS_ERROR;
245 
246 		p9_client_cb(priv->client, req, status);
247 	}
248 }
249 
250 static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
251 {
252 	struct xen_9pfs_dataring *ring = r;
253 
254 	if (!ring || !ring->priv->client) {
255 		/* ignore spurious interrupt */
256 		return IRQ_HANDLED;
257 	}
258 
259 	wake_up_interruptible(&ring->wq);
260 	schedule_work(&ring->work);
261 
262 	return IRQ_HANDLED;
263 }
264 
265 static struct p9_trans_module p9_xen_trans = {
266 	.name = "xen",
267 	.maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT),
268 	.def = 1,
269 	.create = p9_xen_create,
270 	.close = p9_xen_close,
271 	.request = p9_xen_request,
272 	.cancel = p9_xen_cancel,
273 	.owner = THIS_MODULE,
274 };
275 
276 static const struct xenbus_device_id xen_9pfs_front_ids[] = {
277 	{ "9pfs" },
278 	{ "" }
279 };
280 
281 static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
282 {
283 	int i, j;
284 
285 	write_lock(&xen_9pfs_lock);
286 	list_del(&priv->list);
287 	write_unlock(&xen_9pfs_lock);
288 
289 	for (i = 0; i < priv->num_rings; i++) {
290 		if (!priv->rings[i].intf)
291 			break;
292 		if (priv->rings[i].irq > 0)
293 			unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
294 		if (priv->rings[i].data.in) {
295 			for (j = 0; j < (1 << XEN_9PFS_RING_ORDER); j++) {
296 				grant_ref_t ref;
297 
298 				ref = priv->rings[i].intf->ref[j];
299 				gnttab_end_foreign_access(ref, 0, 0);
300 			}
301 			free_pages((unsigned long)priv->rings[i].data.in,
302 				   XEN_9PFS_RING_ORDER -
303 				   (PAGE_SHIFT - XEN_PAGE_SHIFT));
304 		}
305 		gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
306 		free_page((unsigned long)priv->rings[i].intf);
307 	}
308 	kfree(priv->rings);
309 	kfree(priv->tag);
310 	kfree(priv);
311 }
312 
313 static int xen_9pfs_front_remove(struct xenbus_device *dev)
314 {
315 	struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
316 
317 	dev_set_drvdata(&dev->dev, NULL);
318 	xen_9pfs_front_free(priv);
319 	return 0;
320 }
321 
322 static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
323 					 struct xen_9pfs_dataring *ring)
324 {
325 	int i = 0;
326 	int ret = -ENOMEM;
327 	void *bytes = NULL;
328 
329 	init_waitqueue_head(&ring->wq);
330 	spin_lock_init(&ring->lock);
331 	INIT_WORK(&ring->work, p9_xen_response);
332 
333 	ring->intf = (struct xen_9pfs_data_intf *)get_zeroed_page(GFP_KERNEL);
334 	if (!ring->intf)
335 		return ret;
336 	ret = gnttab_grant_foreign_access(dev->otherend_id,
337 					  virt_to_gfn(ring->intf), 0);
338 	if (ret < 0)
339 		goto out;
340 	ring->ref = ret;
341 	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
342 			XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT));
343 	if (!bytes) {
344 		ret = -ENOMEM;
345 		goto out;
346 	}
347 	for (; i < (1 << XEN_9PFS_RING_ORDER); i++) {
348 		ret = gnttab_grant_foreign_access(
349 				dev->otherend_id, virt_to_gfn(bytes) + i, 0);
350 		if (ret < 0)
351 			goto out;
352 		ring->intf->ref[i] = ret;
353 	}
354 	ring->intf->ring_order = XEN_9PFS_RING_ORDER;
355 	ring->data.in = bytes;
356 	ring->data.out = bytes + XEN_9PFS_RING_SIZE;
357 
358 	ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
359 	if (ret)
360 		goto out;
361 	ring->irq = bind_evtchn_to_irqhandler(ring->evtchn,
362 					      xen_9pfs_front_event_handler,
363 					      0, "xen_9pfs-frontend", ring);
364 	if (ring->irq >= 0)
365 		return 0;
366 
367 	xenbus_free_evtchn(dev, ring->evtchn);
368 	ret = ring->irq;
369 out:
370 	if (bytes) {
371 		for (i--; i >= 0; i--)
372 			gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
373 		free_pages((unsigned long)bytes,
374 			   XEN_9PFS_RING_ORDER -
375 			   (PAGE_SHIFT - XEN_PAGE_SHIFT));
376 	}
377 	gnttab_end_foreign_access(ring->ref, 0, 0);
378 	free_page((unsigned long)ring->intf);
379 	return ret;
380 }
381 
382 static int xen_9pfs_front_probe(struct xenbus_device *dev,
383 				const struct xenbus_device_id *id)
384 {
385 	int ret, i;
386 	struct xenbus_transaction xbt;
387 	struct xen_9pfs_front_priv *priv = NULL;
388 	char *versions;
389 	unsigned int max_rings, max_ring_order, len = 0;
390 
391 	versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
392 	if (!len)
393 		return -EINVAL;
394 	if (strcmp(versions, "1")) {
395 		kfree(versions);
396 		return -EINVAL;
397 	}
398 	kfree(versions);
399 	max_rings = xenbus_read_unsigned(dev->otherend, "max-rings", 0);
400 	if (max_rings < XEN_9PFS_NUM_RINGS)
401 		return -EINVAL;
402 	max_ring_order = xenbus_read_unsigned(dev->otherend,
403 					      "max-ring-page-order", 0);
404 	if (max_ring_order < XEN_9PFS_RING_ORDER)
405 		return -EINVAL;
406 
407 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
408 	if (!priv)
409 		return -ENOMEM;
410 
411 	priv->dev = dev;
412 	priv->num_rings = XEN_9PFS_NUM_RINGS;
413 	priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
414 			      GFP_KERNEL);
415 	if (!priv->rings) {
416 		kfree(priv);
417 		return -ENOMEM;
418 	}
419 
420 	for (i = 0; i < priv->num_rings; i++) {
421 		priv->rings[i].priv = priv;
422 		ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]);
423 		if (ret < 0)
424 			goto error;
425 	}
426 
427  again:
428 	ret = xenbus_transaction_start(&xbt);
429 	if (ret) {
430 		xenbus_dev_fatal(dev, ret, "starting transaction");
431 		goto error;
432 	}
433 	ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
434 	if (ret)
435 		goto error_xenbus;
436 	ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u",
437 			    priv->num_rings);
438 	if (ret)
439 		goto error_xenbus;
440 	for (i = 0; i < priv->num_rings; i++) {
441 		char str[16];
442 
443 		BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
444 		sprintf(str, "ring-ref%u", i);
445 		ret = xenbus_printf(xbt, dev->nodename, str, "%d",
446 				    priv->rings[i].ref);
447 		if (ret)
448 			goto error_xenbus;
449 
450 		sprintf(str, "event-channel-%u", i);
451 		ret = xenbus_printf(xbt, dev->nodename, str, "%u",
452 				    priv->rings[i].evtchn);
453 		if (ret)
454 			goto error_xenbus;
455 	}
456 	priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
457 	if (IS_ERR(priv->tag)) {
458 		ret = PTR_ERR(priv->tag);
459 		goto error_xenbus;
460 	}
461 	ret = xenbus_transaction_end(xbt, 0);
462 	if (ret) {
463 		if (ret == -EAGAIN)
464 			goto again;
465 		xenbus_dev_fatal(dev, ret, "completing transaction");
466 		goto error;
467 	}
468 
469 	write_lock(&xen_9pfs_lock);
470 	list_add_tail(&priv->list, &xen_9pfs_devs);
471 	write_unlock(&xen_9pfs_lock);
472 	dev_set_drvdata(&dev->dev, priv);
473 	xenbus_switch_state(dev, XenbusStateInitialised);
474 
475 	return 0;
476 
477  error_xenbus:
478 	xenbus_transaction_end(xbt, 1);
479 	xenbus_dev_fatal(dev, ret, "writing xenstore");
480  error:
481 	dev_set_drvdata(&dev->dev, NULL);
482 	xen_9pfs_front_free(priv);
483 	return ret;
484 }
485 
486 static int xen_9pfs_front_resume(struct xenbus_device *dev)
487 {
488 	dev_warn(&dev->dev, "suspsend/resume unsupported\n");
489 	return 0;
490 }
491 
492 static void xen_9pfs_front_changed(struct xenbus_device *dev,
493 				   enum xenbus_state backend_state)
494 {
495 	switch (backend_state) {
496 	case XenbusStateReconfiguring:
497 	case XenbusStateReconfigured:
498 	case XenbusStateInitialising:
499 	case XenbusStateInitialised:
500 	case XenbusStateUnknown:
501 		break;
502 
503 	case XenbusStateInitWait:
504 		break;
505 
506 	case XenbusStateConnected:
507 		xenbus_switch_state(dev, XenbusStateConnected);
508 		break;
509 
510 	case XenbusStateClosed:
511 		if (dev->state == XenbusStateClosed)
512 			break;
513 		/* Missed the backend's CLOSING state -- fallthrough */
514 	case XenbusStateClosing:
515 		xenbus_frontend_closed(dev);
516 		break;
517 	}
518 }
519 
520 static struct xenbus_driver xen_9pfs_front_driver = {
521 	.ids = xen_9pfs_front_ids,
522 	.probe = xen_9pfs_front_probe,
523 	.remove = xen_9pfs_front_remove,
524 	.resume = xen_9pfs_front_resume,
525 	.otherend_changed = xen_9pfs_front_changed,
526 };
527 
528 static int p9_trans_xen_init(void)
529 {
530 	if (!xen_domain())
531 		return -ENODEV;
532 
533 	pr_info("Initialising Xen transport for 9pfs\n");
534 
535 	v9fs_register_trans(&p9_xen_trans);
536 	return xenbus_register_frontend(&xen_9pfs_front_driver);
537 }
538 module_init(p9_trans_xen_init);
539 
540 static void p9_trans_xen_exit(void)
541 {
542 	v9fs_unregister_trans(&p9_xen_trans);
543 	return xenbus_unregister_driver(&xen_9pfs_front_driver);
544 }
545 module_exit(p9_trans_xen_exit);
546