xref: /openbmc/linux/net/9p/trans_virtio.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
11f327613SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2b530cc79SEric Van Hensbergen /*
3fea511a6SEric Van Hensbergen  * The Virtio 9p transport driver
4b530cc79SEric Van Hensbergen  *
5e2735b77SEric Van Hensbergen  * This is a block based transport driver based on the lguest block driver
6e2735b77SEric Van Hensbergen  * code.
7b530cc79SEric Van Hensbergen  *
8fea511a6SEric Van Hensbergen  *  Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation
9b530cc79SEric Van Hensbergen  *
10b530cc79SEric Van Hensbergen  *  Based on virtio console driver
11b530cc79SEric Van Hensbergen  *  Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
12b530cc79SEric Van Hensbergen  */
13b530cc79SEric Van Hensbergen 
145d385153SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
155d385153SJoe Perches 
16b530cc79SEric Van Hensbergen #include <linux/in.h>
17b530cc79SEric Van Hensbergen #include <linux/module.h>
18b530cc79SEric Van Hensbergen #include <linux/net.h>
19b530cc79SEric Van Hensbergen #include <linux/ipv6.h>
20b530cc79SEric Van Hensbergen #include <linux/errno.h>
21b530cc79SEric Van Hensbergen #include <linux/kernel.h>
22b530cc79SEric Van Hensbergen #include <linux/un.h>
23b530cc79SEric Van Hensbergen #include <linux/uaccess.h>
24b530cc79SEric Van Hensbergen #include <linux/inet.h>
25b530cc79SEric Van Hensbergen #include <linux/file.h>
26b9cdc88dSWill Deacon #include <linux/highmem.h>
275a0e3ad6STejun Heo #include <linux/slab.h>
28b530cc79SEric Van Hensbergen #include <net/9p/9p.h>
29b530cc79SEric Van Hensbergen #include <linux/parser.h>
308b81ef58SEric Van Hensbergen #include <net/9p/client.h>
31b530cc79SEric Van Hensbergen #include <net/9p/transport.h>
32b530cc79SEric Van Hensbergen #include <linux/scatterlist.h>
3368da9ba4SVenkateswararao Jujjuri (JV) #include <linux/swap.h>
34b530cc79SEric Van Hensbergen #include <linux/virtio.h>
35b530cc79SEric Van Hensbergen #include <linux/virtio_9p.h>
364038866dSVenkateswararao Jujjuri (JV) #include "trans_common.h"
37b530cc79SEric Van Hensbergen 
38e2735b77SEric Van Hensbergen #define VIRTQUEUE_NUM	128
39e2735b77SEric Van Hensbergen 
40b530cc79SEric Van Hensbergen /* a single mutex to manage channel initialization and attachment */
41c1549497SJosef 'Jeff' Sipek static DEFINE_MUTEX(virtio_9p_lock);
4268da9ba4SVenkateswararao Jujjuri (JV) static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
4368da9ba4SVenkateswararao Jujjuri (JV) static atomic_t vp_pinned = ATOMIC_INIT(0);
44b530cc79SEric Van Hensbergen 
45ee443996SEric Van Hensbergen /**
46ee443996SEric Van Hensbergen  * struct virtio_chan - per-instance transport information
47ee443996SEric Van Hensbergen  * @inuse: whether the channel is in use
48ee443996SEric Van Hensbergen  * @lock: protects multiple elements within this structure
490e15597eSAbhishek Kulkarni  * @client: client instance
50ee443996SEric Van Hensbergen  * @vdev: virtio dev associated with this channel
51ee443996SEric Van Hensbergen  * @vq: virtio queue associated with this channel
52760b3d61SAndrew Lunn  * @ring_bufs_avail: flag to indicate there is some available in the ring buf
53760b3d61SAndrew Lunn  * @vc_wq: wait queue for waiting for thing to be added to ring buf
54760b3d61SAndrew Lunn  * @p9_max_pages: maximum number of pinned pages
55ee443996SEric Van Hensbergen  * @sg: scatter gather list which is used to pack a request (protected?)
56760b3d61SAndrew Lunn  * @chan_list: linked list of channels
57ee443996SEric Van Hensbergen  *
58ee443996SEric Van Hensbergen  * We keep all per-channel information in a structure.
59b530cc79SEric Van Hensbergen  * This structure is allocated within the devices dev->mem space.
60b530cc79SEric Van Hensbergen  * A pointer to the structure will get put in the transport private.
61ee443996SEric Van Hensbergen  *
62b530cc79SEric Van Hensbergen  */
63ee443996SEric Van Hensbergen 
6437c1209dSAneesh Kumar K.V struct virtio_chan {
65ee443996SEric Van Hensbergen 	bool inuse;
66b530cc79SEric Van Hensbergen 
67e2735b77SEric Van Hensbergen 	spinlock_t lock;
68e2735b77SEric Van Hensbergen 
69fea511a6SEric Van Hensbergen 	struct p9_client *client;
70b530cc79SEric Van Hensbergen 	struct virtio_device *vdev;
71e2735b77SEric Van Hensbergen 	struct virtqueue *vq;
7252f44e0dSVenkateswararao Jujjuri (JV) 	int ring_bufs_avail;
7352f44e0dSVenkateswararao Jujjuri (JV) 	wait_queue_head_t *vc_wq;
7468da9ba4SVenkateswararao Jujjuri (JV) 	/* This is global limit. Since we don't have a global structure,
7568da9ba4SVenkateswararao Jujjuri (JV) 	 * will be placing it in each channel.
7668da9ba4SVenkateswararao Jujjuri (JV) 	 */
777293bfbaSZhang Yanfei 	unsigned long p9_max_pages;
78e2735b77SEric Van Hensbergen 	/* Scatterlist: can be too big for stack. */
79e2735b77SEric Van Hensbergen 	struct scatterlist sg[VIRTQUEUE_NUM];
80760b3d61SAndrew Lunn 	/**
81760b3d61SAndrew Lunn 	 * @tag: name to identify a mount null terminated
8297ee9b02SAneesh Kumar K.V 	 */
8397ee9b02SAneesh Kumar K.V 	char *tag;
8497ee9b02SAneesh Kumar K.V 
8537c1209dSAneesh Kumar K.V 	struct list_head chan_list;
8637c1209dSAneesh Kumar K.V };
8737c1209dSAneesh Kumar K.V 
8837c1209dSAneesh Kumar K.V static struct list_head virtio_chan_list;
89b530cc79SEric Van Hensbergen 
90b530cc79SEric Van Hensbergen /* How many bytes left in this page. */
rest_of_page(void * data)91b530cc79SEric Van Hensbergen static unsigned int rest_of_page(void *data)
92b530cc79SEric Van Hensbergen {
93222e4adeSAl Viro 	return PAGE_SIZE - offset_in_page(data);
94b530cc79SEric Van Hensbergen }
95b530cc79SEric Van Hensbergen 
96ee443996SEric Van Hensbergen /**
97ee443996SEric Van Hensbergen  * p9_virtio_close - reclaim resources of a channel
980e15597eSAbhishek Kulkarni  * @client: client instance
99ee443996SEric Van Hensbergen  *
100ee443996SEric Van Hensbergen  * This reclaims a channel by freeing its resources and
1018ab1784dSZheng Yongjun  * resetting its inuse flag.
102ee443996SEric Van Hensbergen  *
103ee443996SEric Van Hensbergen  */
104ee443996SEric Van Hensbergen 
p9_virtio_close(struct p9_client * client)1058b81ef58SEric Van Hensbergen static void p9_virtio_close(struct p9_client *client)
106b530cc79SEric Van Hensbergen {
1078b81ef58SEric Van Hensbergen 	struct virtio_chan *chan = client->trans;
108b530cc79SEric Van Hensbergen 
109c1549497SJosef 'Jeff' Sipek 	mutex_lock(&virtio_9p_lock);
110fb786100SAneesh Kumar K.V 	if (chan)
111b530cc79SEric Van Hensbergen 		chan->inuse = false;
112c1549497SJosef 'Jeff' Sipek 	mutex_unlock(&virtio_9p_lock);
113b530cc79SEric Van Hensbergen }
114b530cc79SEric Van Hensbergen 
115ee443996SEric Van Hensbergen /**
116ee443996SEric Van Hensbergen  * req_done - callback which signals activity from the server
117ee443996SEric Van Hensbergen  * @vq: virtio queue activity was received on
118ee443996SEric Van Hensbergen  *
119ee443996SEric Van Hensbergen  * This notifies us that the server has triggered some activity
120ee443996SEric Van Hensbergen  * on the virtio channel - most likely a response to request we
121ee443996SEric Van Hensbergen  * sent.  Figure out which requests now have responses and wake up
122ee443996SEric Van Hensbergen  * those threads.
123ee443996SEric Van Hensbergen  *
124ee443996SEric Van Hensbergen  * Bugs: could do with some additional sanity checking, but appears to work.
125ee443996SEric Van Hensbergen  *
126ee443996SEric Van Hensbergen  */
127ee443996SEric Van Hensbergen 
req_done(struct virtqueue * vq)128e2735b77SEric Van Hensbergen static void req_done(struct virtqueue *vq)
129b530cc79SEric Van Hensbergen {
130e2735b77SEric Van Hensbergen 	struct virtio_chan *chan = vq->vdev->priv;
131e2735b77SEric Van Hensbergen 	unsigned int len;
132e2735b77SEric Van Hensbergen 	struct p9_req_t *req;
13331934da8Sjiangyiwen 	bool need_wakeup = false;
134419b3956SVenkateswararao Jujjuri (JV) 	unsigned long flags;
135b530cc79SEric Van Hensbergen 
1365d385153SJoe Perches 	p9_debug(P9_DEBUG_TRANS, ": request done\n");
13791b8534fSEric Van Hensbergen 
138419b3956SVenkateswararao Jujjuri (JV) 	spin_lock_irqsave(&chan->lock, flags);
13931934da8Sjiangyiwen 	while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) {
14031934da8Sjiangyiwen 		if (!chan->ring_bufs_avail) {
14152f44e0dSVenkateswararao Jujjuri (JV) 			chan->ring_bufs_avail = 1;
14231934da8Sjiangyiwen 			need_wakeup = true;
14331934da8Sjiangyiwen 		}
14431934da8Sjiangyiwen 
145f984579aSTomas Bortoli 		if (len) {
146523adb6cSDominique Martinet 			req->rc.size = len;
1472b6e72edSDominique Martinet 			p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
148e2735b77SEric Van Hensbergen 		}
149f984579aSTomas Bortoli 	}
15031934da8Sjiangyiwen 	spin_unlock_irqrestore(&chan->lock, flags);
15131934da8Sjiangyiwen 	/* Wakeup if anyone waiting for VirtIO ring space. */
15231934da8Sjiangyiwen 	if (need_wakeup)
15331934da8Sjiangyiwen 		wake_up(chan->vc_wq);
154b530cc79SEric Van Hensbergen }
155b530cc79SEric Van Hensbergen 
156ee443996SEric Van Hensbergen /**
157ee443996SEric Van Hensbergen  * pack_sg_list - pack a scatter gather list from a linear buffer
158ee443996SEric Van Hensbergen  * @sg: scatter/gather list to pack into
159ee443996SEric Van Hensbergen  * @start: which segment of the sg_list to start at
160ee443996SEric Van Hensbergen  * @limit: maximum segment to pack data to
161ee443996SEric Van Hensbergen  * @data: data to pack into scatter/gather list
162ee443996SEric Van Hensbergen  * @count: amount of data to pack into the scatter/gather list
163ee443996SEric Van Hensbergen  *
164ee443996SEric Van Hensbergen  * sg_lists have multiple segments of various sizes.  This will pack
165ee443996SEric Van Hensbergen  * arbitrary data into an existing scatter gather list, segmenting the
166ee443996SEric Van Hensbergen  * data as necessary within constraints.
167ee443996SEric Van Hensbergen  *
168ee443996SEric Van Hensbergen  */
169ee443996SEric Van Hensbergen 
pack_sg_list(struct scatterlist * sg,int start,int limit,char * data,int count)170abfa034eSAneesh Kumar K.V static int pack_sg_list(struct scatterlist *sg, int start,
171abfa034eSAneesh Kumar K.V 			int limit, char *data, int count)
172e2735b77SEric Van Hensbergen {
173e2735b77SEric Van Hensbergen 	int s;
174e2735b77SEric Van Hensbergen 	int index = start;
175e2735b77SEric Van Hensbergen 
176e2735b77SEric Van Hensbergen 	while (count) {
177e2735b77SEric Van Hensbergen 		s = rest_of_page(data);
178e2735b77SEric Van Hensbergen 		if (s > count)
179e2735b77SEric Van Hensbergen 			s = count;
18023cba9cbSjiangyiwen 		BUG_ON(index >= limit);
1810b36f1adSRusty Russell 		/* Make sure we don't terminate early. */
1820b36f1adSRusty Russell 		sg_unmark_end(&sg[index]);
183e2735b77SEric Van Hensbergen 		sg_set_buf(&sg[index++], data, s);
184e2735b77SEric Van Hensbergen 		count -= s;
185e2735b77SEric Van Hensbergen 		data += s;
186e2735b77SEric Van Hensbergen 	}
1870b36f1adSRusty Russell 	if (index-start)
1880b36f1adSRusty Russell 		sg_mark_end(&sg[index - 1]);
189e2735b77SEric Van Hensbergen 	return index-start;
190e2735b77SEric Van Hensbergen }
191e2735b77SEric Van Hensbergen 
19291b8534fSEric Van Hensbergen /* We don't currently allow canceling of virtio requests */
p9_virtio_cancel(struct p9_client * client,struct p9_req_t * req)19391b8534fSEric Van Hensbergen static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
19491b8534fSEric Van Hensbergen {
19591b8534fSEric Van Hensbergen 	return 1;
19691b8534fSEric Van Hensbergen }
19791b8534fSEric Van Hensbergen 
198728356deSTomas Bortoli /* Reply won't come, so drop req ref */
p9_virtio_cancelled(struct p9_client * client,struct p9_req_t * req)199728356deSTomas Bortoli static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
200728356deSTomas Bortoli {
2018b11ff09SKent Overstreet 	p9_req_put(client, req);
202728356deSTomas Bortoli 	return 0;
203728356deSTomas Bortoli }
204728356deSTomas Bortoli 
205ee443996SEric Van Hensbergen /**
2064038866dSVenkateswararao Jujjuri (JV)  * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
2074038866dSVenkateswararao Jujjuri (JV)  * this takes a list of pages.
2084038866dSVenkateswararao Jujjuri (JV)  * @sg: scatter/gather list to pack into
2094038866dSVenkateswararao Jujjuri (JV)  * @start: which segment of the sg_list to start at
210760b3d61SAndrew Lunn  * @limit: maximum number of pages in sg list.
2112c53040fSBen Hutchings  * @pdata: a list of pages to add into sg.
212abfa034eSAneesh Kumar K.V  * @nr_pages: number of pages to pack into the scatter/gather list
2134f3b35c1SAl Viro  * @offs: amount of data in the beginning of first page _not_ to pack
2144038866dSVenkateswararao Jujjuri (JV)  * @count: amount of data to pack into the scatter/gather list
2154038866dSVenkateswararao Jujjuri (JV)  */
2164038866dSVenkateswararao Jujjuri (JV) static int
pack_sg_list_p(struct scatterlist * sg,int start,int limit,struct page ** pdata,int nr_pages,size_t offs,int count)217abfa034eSAneesh Kumar K.V pack_sg_list_p(struct scatterlist *sg, int start, int limit,
2184f3b35c1SAl Viro 	       struct page **pdata, int nr_pages, size_t offs, int count)
2194038866dSVenkateswararao Jujjuri (JV) {
220abfa034eSAneesh Kumar K.V 	int i = 0, s;
2214f3b35c1SAl Viro 	int data_off = offs;
2224038866dSVenkateswararao Jujjuri (JV) 	int index = start;
2234038866dSVenkateswararao Jujjuri (JV) 
224abfa034eSAneesh Kumar K.V 	BUG_ON(nr_pages > (limit - start));
225abfa034eSAneesh Kumar K.V 	/*
226abfa034eSAneesh Kumar K.V 	 * if the first page doesn't start at
227abfa034eSAneesh Kumar K.V 	 * page boundary find the offset
228abfa034eSAneesh Kumar K.V 	 */
229abfa034eSAneesh Kumar K.V 	while (nr_pages) {
2304f3b35c1SAl Viro 		s = PAGE_SIZE - data_off;
231abfa034eSAneesh Kumar K.V 		if (s > count)
232abfa034eSAneesh Kumar K.V 			s = count;
23323cba9cbSjiangyiwen 		BUG_ON(index >= limit);
2340b36f1adSRusty Russell 		/* Make sure we don't terminate early. */
2350b36f1adSRusty Russell 		sg_unmark_end(&sg[index]);
236abfa034eSAneesh Kumar K.V 		sg_set_page(&sg[index++], pdata[i++], s, data_off);
237abfa034eSAneesh Kumar K.V 		data_off = 0;
2384038866dSVenkateswararao Jujjuri (JV) 		count -= s;
239abfa034eSAneesh Kumar K.V 		nr_pages--;
2404038866dSVenkateswararao Jujjuri (JV) 	}
2410b36f1adSRusty Russell 
2420b36f1adSRusty Russell 	if (index-start)
2430b36f1adSRusty Russell 		sg_mark_end(&sg[index - 1]);
2444038866dSVenkateswararao Jujjuri (JV) 	return index - start;
2454038866dSVenkateswararao Jujjuri (JV) }
2464038866dSVenkateswararao Jujjuri (JV) 
2474038866dSVenkateswararao Jujjuri (JV) /**
24891b8534fSEric Van Hensbergen  * p9_virtio_request - issue a request
2490e15597eSAbhishek Kulkarni  * @client: client instance issuing the request
2500e15597eSAbhishek Kulkarni  * @req: request to be issued
251ee443996SEric Van Hensbergen  *
252ee443996SEric Van Hensbergen  */
253ee443996SEric Van Hensbergen 
254e2735b77SEric Van Hensbergen static int
p9_virtio_request(struct p9_client * client,struct p9_req_t * req)25591b8534fSEric Van Hensbergen p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
256e2735b77SEric Van Hensbergen {
257abfa034eSAneesh Kumar K.V 	int err;
2580b36f1adSRusty Russell 	int in, out, out_sgs, in_sgs;
259419b3956SVenkateswararao Jujjuri (JV) 	unsigned long flags;
260abfa034eSAneesh Kumar K.V 	struct virtio_chan *chan = client->trans;
2610b36f1adSRusty Russell 	struct scatterlist *sgs[2];
262e2735b77SEric Van Hensbergen 
2635d385153SJoe Perches 	p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
264e2735b77SEric Van Hensbergen 
2651a4f69efSDominique Martinet 	WRITE_ONCE(req->status, REQ_STATUS_SENT);
266abfa034eSAneesh Kumar K.V req_retry:
267419b3956SVenkateswararao Jujjuri (JV) 	spin_lock_irqsave(&chan->lock, flags);
2684038866dSVenkateswararao Jujjuri (JV) 
2690b36f1adSRusty Russell 	out_sgs = in_sgs = 0;
2704038866dSVenkateswararao Jujjuri (JV) 	/* Handle out VirtIO ring buffers */
271abfa034eSAneesh Kumar K.V 	out = pack_sg_list(chan->sg, 0,
272523adb6cSDominique Martinet 			   VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
2730b36f1adSRusty Russell 	if (out)
2740b36f1adSRusty Russell 		sgs[out_sgs++] = chan->sg;
2754038866dSVenkateswararao Jujjuri (JV) 
276abfa034eSAneesh Kumar K.V 	in = pack_sg_list(chan->sg, out,
277523adb6cSDominique Martinet 			  VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity);
2780b36f1adSRusty Russell 	if (in)
2790b36f1adSRusty Russell 		sgs[out_sgs + in_sgs++] = chan->sg + out;
280e2735b77SEric Van Hensbergen 
281474fe9f7SAl Viro 	err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
282f96fde41SRusty Russell 				GFP_ATOMIC);
283419b3956SVenkateswararao Jujjuri (JV) 	if (err < 0) {
28452f44e0dSVenkateswararao Jujjuri (JV) 		if (err == -ENOSPC) {
28552f44e0dSVenkateswararao Jujjuri (JV) 			chan->ring_bufs_avail = 0;
28652f44e0dSVenkateswararao Jujjuri (JV) 			spin_unlock_irqrestore(&chan->lock, flags);
2879523feacSTuomas Tynkkynen 			err = wait_event_killable(*chan->vc_wq,
28852f44e0dSVenkateswararao Jujjuri (JV) 						  chan->ring_bufs_avail);
28952f44e0dSVenkateswararao Jujjuri (JV) 			if (err  == -ERESTARTSYS)
29052f44e0dSVenkateswararao Jujjuri (JV) 				return err;
29152f44e0dSVenkateswararao Jujjuri (JV) 
2925d385153SJoe Perches 			p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
293abfa034eSAneesh Kumar K.V 			goto req_retry;
294abfa034eSAneesh Kumar K.V 		} else {
295abfa034eSAneesh Kumar K.V 			spin_unlock_irqrestore(&chan->lock, flags);
2965d385153SJoe Perches 			p9_debug(P9_DEBUG_TRANS,
2970b36f1adSRusty Russell 				 "virtio rpc add_sgs returned failure\n");
298abfa034eSAneesh Kumar K.V 			return -EIO;
299abfa034eSAneesh Kumar K.V 		}
300abfa034eSAneesh Kumar K.V 	}
301abfa034eSAneesh Kumar K.V 	virtqueue_kick(chan->vq);
302abfa034eSAneesh Kumar K.V 	spin_unlock_irqrestore(&chan->lock, flags);
303abfa034eSAneesh Kumar K.V 
3045d385153SJoe Perches 	p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
305abfa034eSAneesh Kumar K.V 	return 0;
306abfa034eSAneesh Kumar K.V }
307abfa034eSAneesh Kumar K.V 
p9_get_mapped_pages(struct virtio_chan * chan,struct page *** pages,struct iov_iter * data,int count,size_t * offs,int * need_drop)308abfa034eSAneesh Kumar K.V static int p9_get_mapped_pages(struct virtio_chan *chan,
3094f3b35c1SAl Viro 			       struct page ***pages,
3104f3b35c1SAl Viro 			       struct iov_iter *data,
3114f3b35c1SAl Viro 			       int count,
3124f3b35c1SAl Viro 			       size_t *offs,
3134f3b35c1SAl Viro 			       int *need_drop)
314abfa034eSAneesh Kumar K.V {
3154f3b35c1SAl Viro 	int nr_pages;
316abfa034eSAneesh Kumar K.V 	int err;
3174f3b35c1SAl Viro 
3184f3b35c1SAl Viro 	if (!iov_iter_count(data))
3194f3b35c1SAl Viro 		return 0;
3204f3b35c1SAl Viro 
3212cbfdf4dSMarc Zyngier 	if (!iov_iter_is_kvec(data)) {
3224f3b35c1SAl Viro 		int n;
323abfa034eSAneesh Kumar K.V 		/*
324abfa034eSAneesh Kumar K.V 		 * We allow only p9_max_pages pinned. We wait for the
325abfa034eSAneesh Kumar K.V 		 * Other zc request to finish here
326abfa034eSAneesh Kumar K.V 		 */
327abfa034eSAneesh Kumar K.V 		if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
3289523feacSTuomas Tynkkynen 			err = wait_event_killable(vp_wq,
329abfa034eSAneesh Kumar K.V 			      (atomic_read(&vp_pinned) < chan->p9_max_pages));
330abfa034eSAneesh Kumar K.V 			if (err == -ERESTARTSYS)
331abfa034eSAneesh Kumar K.V 				return err;
332abfa034eSAneesh Kumar K.V 		}
3337f024647SAl Viro 		n = iov_iter_get_pages_alloc2(data, pages, count, offs);
3344f3b35c1SAl Viro 		if (n < 0)
3354f3b35c1SAl Viro 			return n;
3364f3b35c1SAl Viro 		*need_drop = 1;
3374f3b35c1SAl Viro 		nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
338abfa034eSAneesh Kumar K.V 		atomic_add(nr_pages, &vp_pinned);
3394f3b35c1SAl Viro 		return n;
340abfa034eSAneesh Kumar K.V 	} else {
341abfa034eSAneesh Kumar K.V 		/* kernel buffer, no need to pin pages */
3424f3b35c1SAl Viro 		int index;
3434f3b35c1SAl Viro 		size_t len;
3444f3b35c1SAl Viro 		void *p;
3454f3b35c1SAl Viro 
3464f3b35c1SAl Viro 		/* we'd already checked that it's non-empty */
3474f3b35c1SAl Viro 		while (1) {
3484f3b35c1SAl Viro 			len = iov_iter_single_seg_count(data);
3494f3b35c1SAl Viro 			if (likely(len)) {
3504f3b35c1SAl Viro 				p = data->kvec->iov_base + data->iov_offset;
3514f3b35c1SAl Viro 				break;
3524f3b35c1SAl Viro 			}
3534f3b35c1SAl Viro 			iov_iter_advance(data, 0);
3544f3b35c1SAl Viro 		}
3554f3b35c1SAl Viro 		if (len > count)
3564f3b35c1SAl Viro 			len = count;
3574f3b35c1SAl Viro 
3584f3b35c1SAl Viro 		nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
3594f3b35c1SAl Viro 			   (unsigned long)p / PAGE_SIZE;
3604f3b35c1SAl Viro 
3616da2ec56SKees Cook 		*pages = kmalloc_array(nr_pages, sizeof(struct page *),
3626da2ec56SKees Cook 				       GFP_NOFS);
3634f3b35c1SAl Viro 		if (!*pages)
3644f3b35c1SAl Viro 			return -ENOMEM;
3654f3b35c1SAl Viro 
3664f3b35c1SAl Viro 		*need_drop = 0;
367222e4adeSAl Viro 		p -= (*offs = offset_in_page(p));
3684f3b35c1SAl Viro 		for (index = 0; index < nr_pages; index++) {
3694f3b35c1SAl Viro 			if (is_vmalloc_addr(p))
3704f3b35c1SAl Viro 				(*pages)[index] = vmalloc_to_page(p);
371b6f52ae2SRichard Yao 			else
3724f3b35c1SAl Viro 				(*pages)[index] = kmap_to_page(p);
3734f3b35c1SAl Viro 			p += PAGE_SIZE;
374abfa034eSAneesh Kumar K.V 		}
3757f024647SAl Viro 		iov_iter_advance(data, len);
3764f3b35c1SAl Viro 		return len;
377abfa034eSAneesh Kumar K.V 	}
378abfa034eSAneesh Kumar K.V }
379abfa034eSAneesh Kumar K.V 
handle_rerror(struct p9_req_t * req,int in_hdr_len,size_t offs,struct page ** pages)380f615625aSAl Viro static void handle_rerror(struct p9_req_t *req, int in_hdr_len,
381f615625aSAl Viro 			  size_t offs, struct page **pages)
382f615625aSAl Viro {
383f615625aSAl Viro 	unsigned size, n;
384f615625aSAl Viro 	void *to = req->rc.sdata + in_hdr_len;
385f615625aSAl Viro 
386f615625aSAl Viro 	// Fits entirely into the static data?  Nothing to do.
38713ade4acSDominique Martinet 	if (req->rc.size < in_hdr_len || !pages)
388f615625aSAl Viro 		return;
389f615625aSAl Viro 
390f615625aSAl Viro 	// Really long error message?  Tough, truncate the reply.  Might get
391f615625aSAl Viro 	// rejected (we can't be arsed to adjust the size encoded in header,
392f615625aSAl Viro 	// or string size for that matter), but it wouldn't be anything valid
393f615625aSAl Viro 	// anyway.
394f615625aSAl Viro 	if (unlikely(req->rc.size > P9_ZC_HDR_SZ))
395f615625aSAl Viro 		req->rc.size = P9_ZC_HDR_SZ;
396f615625aSAl Viro 
397f615625aSAl Viro 	// data won't span more than two pages
398f615625aSAl Viro 	size = req->rc.size - in_hdr_len;
399f615625aSAl Viro 	n = PAGE_SIZE - offs;
400f615625aSAl Viro 	if (size > n) {
401f615625aSAl Viro 		memcpy_from_page(to, *pages++, offs, n);
402f615625aSAl Viro 		offs = 0;
403f615625aSAl Viro 		to += n;
404f615625aSAl Viro 		size -= n;
405f615625aSAl Viro 	}
406f615625aSAl Viro 	memcpy_from_page(to, *pages, offs, size);
407f615625aSAl Viro }
408f615625aSAl Viro 
409abfa034eSAneesh Kumar K.V /**
410abfa034eSAneesh Kumar K.V  * p9_virtio_zc_request - issue a zero copy request
411abfa034eSAneesh Kumar K.V  * @client: client instance issuing the request
412abfa034eSAneesh Kumar K.V  * @req: request to be issued
413c7ebbae7Spiaojun  * @uidata: user buffer that should be used for zero copy read
414c7ebbae7Spiaojun  * @uodata: user buffer that should be used for zero copy write
415abfa034eSAneesh Kumar K.V  * @inlen: read buffer size
4164a026da9SSun Lianwen  * @outlen: write buffer size
4174a026da9SSun Lianwen  * @in_hdr_len: reader header size, This is the size of response protocol data
418abfa034eSAneesh Kumar K.V  *
419abfa034eSAneesh Kumar K.V  */
420abfa034eSAneesh Kumar K.V static int
p9_virtio_zc_request(struct p9_client * client,struct p9_req_t * req,struct iov_iter * uidata,struct iov_iter * uodata,int inlen,int outlen,int in_hdr_len)421abfa034eSAneesh Kumar K.V p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
4224f3b35c1SAl Viro 		     struct iov_iter *uidata, struct iov_iter *uodata,
4234f3b35c1SAl Viro 		     int inlen, int outlen, int in_hdr_len)
424abfa034eSAneesh Kumar K.V {
4250b36f1adSRusty Russell 	int in, out, err, out_sgs, in_sgs;
426abfa034eSAneesh Kumar K.V 	unsigned long flags;
427abfa034eSAneesh Kumar K.V 	int in_nr_pages = 0, out_nr_pages = 0;
428abfa034eSAneesh Kumar K.V 	struct page **in_pages = NULL, **out_pages = NULL;
429abfa034eSAneesh Kumar K.V 	struct virtio_chan *chan = client->trans;
4300b36f1adSRusty Russell 	struct scatterlist *sgs[4];
4314a73edabSDominique Martinet 	size_t offs = 0;
4324f3b35c1SAl Viro 	int need_drop = 0;
433728356deSTomas Bortoli 	int kicked = 0;
434abfa034eSAneesh Kumar K.V 
4355d385153SJoe Perches 	p9_debug(P9_DEBUG_TRANS, "virtio request\n");
436abfa034eSAneesh Kumar K.V 
437abfa034eSAneesh Kumar K.V 	if (uodata) {
438d28c756cSChirantan Ekbote 		__le32 sz;
4394f3b35c1SAl Viro 		int n = p9_get_mapped_pages(chan, &out_pages, uodata,
4404f3b35c1SAl Viro 					    outlen, &offs, &need_drop);
441728356deSTomas Bortoli 		if (n < 0) {
442728356deSTomas Bortoli 			err = n;
443728356deSTomas Bortoli 			goto err_out;
444728356deSTomas Bortoli 		}
4454f3b35c1SAl Viro 		out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
4464f3b35c1SAl Viro 		if (n != outlen) {
4474f3b35c1SAl Viro 			__le32 v = cpu_to_le32(n);
448523adb6cSDominique Martinet 			memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
4494f3b35c1SAl Viro 			outlen = n;
450abfa034eSAneesh Kumar K.V 		}
451d28c756cSChirantan Ekbote 		/* The size field of the message must include the length of the
452d28c756cSChirantan Ekbote 		 * header and the length of the data.  We didn't actually know
453d28c756cSChirantan Ekbote 		 * the length of the data until this point so add it in now.
454d28c756cSChirantan Ekbote 		 */
455523adb6cSDominique Martinet 		sz = cpu_to_le32(req->tc.size + outlen);
456523adb6cSDominique Martinet 		memcpy(&req->tc.sdata[0], &sz, sizeof(sz));
4574f3b35c1SAl Viro 	} else if (uidata) {
4584f3b35c1SAl Viro 		int n = p9_get_mapped_pages(chan, &in_pages, uidata,
4594f3b35c1SAl Viro 					    inlen, &offs, &need_drop);
460728356deSTomas Bortoli 		if (n < 0) {
461728356deSTomas Bortoli 			err = n;
462728356deSTomas Bortoli 			goto err_out;
463728356deSTomas Bortoli 		}
4644f3b35c1SAl Viro 		in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
4654f3b35c1SAl Viro 		if (n != inlen) {
4664f3b35c1SAl Viro 			__le32 v = cpu_to_le32(n);
467523adb6cSDominique Martinet 			memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
4684f3b35c1SAl Viro 			inlen = n;
469abfa034eSAneesh Kumar K.V 		}
470abfa034eSAneesh Kumar K.V 	}
4711a4f69efSDominique Martinet 	WRITE_ONCE(req->status, REQ_STATUS_SENT);
472abfa034eSAneesh Kumar K.V req_retry_pinned:
473abfa034eSAneesh Kumar K.V 	spin_lock_irqsave(&chan->lock, flags);
4740b36f1adSRusty Russell 
4750b36f1adSRusty Russell 	out_sgs = in_sgs = 0;
4760b36f1adSRusty Russell 
477abfa034eSAneesh Kumar K.V 	/* out data */
478abfa034eSAneesh Kumar K.V 	out = pack_sg_list(chan->sg, 0,
479523adb6cSDominique Martinet 			   VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
480abfa034eSAneesh Kumar K.V 
4810b36f1adSRusty Russell 	if (out)
4820b36f1adSRusty Russell 		sgs[out_sgs++] = chan->sg;
4830b36f1adSRusty Russell 
4840b36f1adSRusty Russell 	if (out_pages) {
4850b36f1adSRusty Russell 		sgs[out_sgs++] = chan->sg + out;
486abfa034eSAneesh Kumar K.V 		out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
4874f3b35c1SAl Viro 				      out_pages, out_nr_pages, offs, outlen);
4880b36f1adSRusty Russell 	}
4890b36f1adSRusty Russell 
490abfa034eSAneesh Kumar K.V 	/*
491abfa034eSAneesh Kumar K.V 	 * Take care of in data
492abfa034eSAneesh Kumar K.V 	 * For example TREAD have 11.
493abfa034eSAneesh Kumar K.V 	 * 11 is the read/write header = PDU Header(7) + IO Size (4).
494abfa034eSAneesh Kumar K.V 	 * Arrange in such a way that server places header in the
4958ab1784dSZheng Yongjun 	 * allocated memory and payload onto the user buffer.
496abfa034eSAneesh Kumar K.V 	 */
497abfa034eSAneesh Kumar K.V 	in = pack_sg_list(chan->sg, out,
498523adb6cSDominique Martinet 			  VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
4990b36f1adSRusty Russell 	if (in)
5000b36f1adSRusty Russell 		sgs[out_sgs + in_sgs++] = chan->sg + out;
5010b36f1adSRusty Russell 
5020b36f1adSRusty Russell 	if (in_pages) {
5030b36f1adSRusty Russell 		sgs[out_sgs + in_sgs++] = chan->sg + out + in;
504*f41b402dSDominique Martinet 		pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
5054f3b35c1SAl Viro 			       in_pages, in_nr_pages, offs, inlen);
5060b36f1adSRusty Russell 	}
507abfa034eSAneesh Kumar K.V 
5080b36f1adSRusty Russell 	BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
509474fe9f7SAl Viro 	err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
510f96fde41SRusty Russell 				GFP_ATOMIC);
511abfa034eSAneesh Kumar K.V 	if (err < 0) {
512abfa034eSAneesh Kumar K.V 		if (err == -ENOSPC) {
513abfa034eSAneesh Kumar K.V 			chan->ring_bufs_avail = 0;
514abfa034eSAneesh Kumar K.V 			spin_unlock_irqrestore(&chan->lock, flags);
5159523feacSTuomas Tynkkynen 			err = wait_event_killable(*chan->vc_wq,
516abfa034eSAneesh Kumar K.V 						  chan->ring_bufs_avail);
517abfa034eSAneesh Kumar K.V 			if (err  == -ERESTARTSYS)
518abfa034eSAneesh Kumar K.V 				goto err_out;
519abfa034eSAneesh Kumar K.V 
5205d385153SJoe Perches 			p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
521316ad550SVenkateswararao Jujjuri (JV) 			goto req_retry_pinned;
52252f44e0dSVenkateswararao Jujjuri (JV) 		} else {
523419b3956SVenkateswararao Jujjuri (JV) 			spin_unlock_irqrestore(&chan->lock, flags);
5245d385153SJoe Perches 			p9_debug(P9_DEBUG_TRANS,
5250b36f1adSRusty Russell 				 "virtio rpc add_sgs returned failure\n");
526abfa034eSAneesh Kumar K.V 			err = -EIO;
527abfa034eSAneesh Kumar K.V 			goto err_out;
528e2735b77SEric Van Hensbergen 		}
52952f44e0dSVenkateswararao Jujjuri (JV) 	}
530dc3f5e68SMichael S. Tsirkin 	virtqueue_kick(chan->vq);
531419b3956SVenkateswararao Jujjuri (JV) 	spin_unlock_irqrestore(&chan->lock, flags);
532728356deSTomas Bortoli 	kicked = 1;
5335d385153SJoe Perches 	p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
5341a4f69efSDominique Martinet 	err = wait_event_killable(req->wq,
5351a4f69efSDominique Martinet 			          READ_ONCE(req->status) >= REQ_STATUS_RCVD);
536f615625aSAl Viro 	// RERROR needs reply (== error string) in static data
5371a4f69efSDominique Martinet 	if (READ_ONCE(req->status) == REQ_STATUS_RCVD &&
538f615625aSAl Viro 	    unlikely(req->rc.sdata[4] == P9_RERROR))
539f615625aSAl Viro 		handle_rerror(req, in_hdr_len, offs, in_pages);
540f615625aSAl Viro 
541abfa034eSAneesh Kumar K.V 	/*
542abfa034eSAneesh Kumar K.V 	 * Non kernel buffers are pinned, unpin them
543abfa034eSAneesh Kumar K.V 	 */
544abfa034eSAneesh Kumar K.V err_out:
5454f3b35c1SAl Viro 	if (need_drop) {
546abfa034eSAneesh Kumar K.V 		if (in_pages) {
547abfa034eSAneesh Kumar K.V 			p9_release_pages(in_pages, in_nr_pages);
548abfa034eSAneesh Kumar K.V 			atomic_sub(in_nr_pages, &vp_pinned);
549abfa034eSAneesh Kumar K.V 		}
550abfa034eSAneesh Kumar K.V 		if (out_pages) {
551abfa034eSAneesh Kumar K.V 			p9_release_pages(out_pages, out_nr_pages);
552abfa034eSAneesh Kumar K.V 			atomic_sub(out_nr_pages, &vp_pinned);
553abfa034eSAneesh Kumar K.V 		}
554abfa034eSAneesh Kumar K.V 		/* wakeup anybody waiting for slots to pin pages */
555abfa034eSAneesh Kumar K.V 		wake_up(&vp_wq);
556abfa034eSAneesh Kumar K.V 	}
5571b8553c0SVegard Nossum 	kvfree(in_pages);
5581b8553c0SVegard Nossum 	kvfree(out_pages);
559728356deSTomas Bortoli 	if (!kicked) {
560728356deSTomas Bortoli 		/* reply won't come */
5618b11ff09SKent Overstreet 		p9_req_put(client, req);
562728356deSTomas Bortoli 	}
563abfa034eSAneesh Kumar K.V 	return err;
564e2735b77SEric Van Hensbergen }
565e2735b77SEric Van Hensbergen 
p9_mount_tag_show(struct device * dev,struct device_attribute * attr,char * buf)56686c84373SAneesh Kumar K.V static ssize_t p9_mount_tag_show(struct device *dev,
56786c84373SAneesh Kumar K.V 				struct device_attribute *attr, char *buf)
56886c84373SAneesh Kumar K.V {
56986c84373SAneesh Kumar K.V 	struct virtio_chan *chan;
57086c84373SAneesh Kumar K.V 	struct virtio_device *vdev;
571edcd9d97Spiaojun 	int tag_len;
57286c84373SAneesh Kumar K.V 
57386c84373SAneesh Kumar K.V 	vdev = dev_to_virtio(dev);
57486c84373SAneesh Kumar K.V 	chan = vdev->priv;
575edcd9d97Spiaojun 	tag_len = strlen(chan->tag);
57686c84373SAneesh Kumar K.V 
577edcd9d97Spiaojun 	memcpy(buf, chan->tag, tag_len + 1);
578179a5bc4SAndrey Ryabinin 
579edcd9d97Spiaojun 	return tag_len + 1;
58086c84373SAneesh Kumar K.V }
58186c84373SAneesh Kumar K.V 
58286c84373SAneesh Kumar K.V static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
58386c84373SAneesh Kumar K.V 
584ee443996SEric Van Hensbergen /**
585ee443996SEric Van Hensbergen  * p9_virtio_probe - probe for existence of 9P virtio channels
586ee443996SEric Van Hensbergen  * @vdev: virtio device to probe
587ee443996SEric Van Hensbergen  *
58837c1209dSAneesh Kumar K.V  * This probes for existing virtio channels.
589ee443996SEric Van Hensbergen  *
590ee443996SEric Van Hensbergen  */
591ee443996SEric Van Hensbergen 
p9_virtio_probe(struct virtio_device * vdev)592e2735b77SEric Van Hensbergen static int p9_virtio_probe(struct virtio_device *vdev)
593b530cc79SEric Van Hensbergen {
59497ee9b02SAneesh Kumar K.V 	__u16 tag_len;
59597ee9b02SAneesh Kumar K.V 	char *tag;
596b530cc79SEric Van Hensbergen 	int err;
597b530cc79SEric Van Hensbergen 	struct virtio_chan *chan;
598b530cc79SEric Van Hensbergen 
5997754f53eSMichael S. Tsirkin 	if (!vdev->config->get) {
6007754f53eSMichael S. Tsirkin 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
6017754f53eSMichael S. Tsirkin 			__func__);
6027754f53eSMichael S. Tsirkin 		return -EINVAL;
6037754f53eSMichael S. Tsirkin 	}
6047754f53eSMichael S. Tsirkin 
60537c1209dSAneesh Kumar K.V 	chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
60637c1209dSAneesh Kumar K.V 	if (!chan) {
6075d385153SJoe Perches 		pr_err("Failed to allocate virtio 9P channel\n");
608b530cc79SEric Van Hensbergen 		err = -ENOMEM;
609b530cc79SEric Van Hensbergen 		goto fail;
610b530cc79SEric Van Hensbergen 	}
611b530cc79SEric Van Hensbergen 
612e2735b77SEric Van Hensbergen 	chan->vdev = vdev;
613e2735b77SEric Van Hensbergen 
614e2735b77SEric Van Hensbergen 	/* We expect one virtqueue, for requests. */
615d2a7dddaSMichael S. Tsirkin 	chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
616e2735b77SEric Van Hensbergen 	if (IS_ERR(chan->vq)) {
617e2735b77SEric Van Hensbergen 		err = PTR_ERR(chan->vq);
61892aef467SJean-Philippe Brucker 		goto out_free_chan;
619b530cc79SEric Van Hensbergen 	}
620e2735b77SEric Van Hensbergen 	chan->vq->vdev->priv = chan;
621e2735b77SEric Van Hensbergen 	spin_lock_init(&chan->lock);
622b530cc79SEric Van Hensbergen 
623e2735b77SEric Van Hensbergen 	sg_init_table(chan->sg, VIRTQUEUE_NUM);
624b530cc79SEric Van Hensbergen 
625b530cc79SEric Van Hensbergen 	chan->inuse = false;
62697ee9b02SAneesh Kumar K.V 	if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
627855e0c52SRusty Russell 		virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
62897ee9b02SAneesh Kumar K.V 	} else {
62997ee9b02SAneesh Kumar K.V 		err = -EINVAL;
63097ee9b02SAneesh Kumar K.V 		goto out_free_vq;
63197ee9b02SAneesh Kumar K.V 	}
632edcd9d97Spiaojun 	tag = kzalloc(tag_len + 1, GFP_KERNEL);
63397ee9b02SAneesh Kumar K.V 	if (!tag) {
63497ee9b02SAneesh Kumar K.V 		err = -ENOMEM;
63597ee9b02SAneesh Kumar K.V 		goto out_free_vq;
63697ee9b02SAneesh Kumar K.V 	}
637855e0c52SRusty Russell 
638855e0c52SRusty Russell 	virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
63997ee9b02SAneesh Kumar K.V 			   tag, tag_len);
64097ee9b02SAneesh Kumar K.V 	chan->tag = tag;
64186c84373SAneesh Kumar K.V 	err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
64286c84373SAneesh Kumar K.V 	if (err) {
64352f44e0dSVenkateswararao Jujjuri (JV) 		goto out_free_tag;
64486c84373SAneesh Kumar K.V 	}
64552f44e0dSVenkateswararao Jujjuri (JV) 	chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
64652f44e0dSVenkateswararao Jujjuri (JV) 	if (!chan->vc_wq) {
64752f44e0dSVenkateswararao Jujjuri (JV) 		err = -ENOMEM;
648f997ea3bSXie Yongji 		goto out_remove_file;
64952f44e0dSVenkateswararao Jujjuri (JV) 	}
65052f44e0dSVenkateswararao Jujjuri (JV) 	init_waitqueue_head(chan->vc_wq);
65152f44e0dSVenkateswararao Jujjuri (JV) 	chan->ring_bufs_avail = 1;
65268da9ba4SVenkateswararao Jujjuri (JV) 	/* Ceiling limit to avoid denial of service attacks */
65368da9ba4SVenkateswararao Jujjuri (JV) 	chan->p9_max_pages = nr_free_buffer_pages()/4;
65452f44e0dSVenkateswararao Jujjuri (JV) 
65564b4cc39SMichael S. Tsirkin 	virtio_device_ready(vdev);
65664b4cc39SMichael S. Tsirkin 
65737c1209dSAneesh Kumar K.V 	mutex_lock(&virtio_9p_lock);
65837c1209dSAneesh Kumar K.V 	list_add_tail(&chan->chan_list, &virtio_chan_list);
65937c1209dSAneesh Kumar K.V 	mutex_unlock(&virtio_9p_lock);
660e0d6cb9cSMichael Marineau 
661e0d6cb9cSMichael Marineau 	/* Let udev rules use the new mount_tag attribute. */
662e0d6cb9cSMichael Marineau 	kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
663e0d6cb9cSMichael Marineau 
664b530cc79SEric Van Hensbergen 	return 0;
665b530cc79SEric Van Hensbergen 
666f997ea3bSXie Yongji out_remove_file:
667f997ea3bSXie Yongji 	sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr);
66852f44e0dSVenkateswararao Jujjuri (JV) out_free_tag:
66952f44e0dSVenkateswararao Jujjuri (JV) 	kfree(tag);
670e2735b77SEric Van Hensbergen out_free_vq:
671d2a7dddaSMichael S. Tsirkin 	vdev->config->del_vqs(vdev);
67292aef467SJean-Philippe Brucker out_free_chan:
67337c1209dSAneesh Kumar K.V 	kfree(chan);
674b530cc79SEric Van Hensbergen fail:
675b530cc79SEric Van Hensbergen 	return err;
676b530cc79SEric Van Hensbergen }
677b530cc79SEric Van Hensbergen 
678ee443996SEric Van Hensbergen 
679ee443996SEric Van Hensbergen /**
680ee443996SEric Van Hensbergen  * p9_virtio_create - allocate a new virtio channel
6818b81ef58SEric Van Hensbergen  * @client: client instance invoking this transport
682ee443996SEric Van Hensbergen  * @devname: string identifying the channel to connect to (unused)
683ee443996SEric Van Hensbergen  * @args: args passed from sys_mount() for per-transport options (unused)
684ee443996SEric Van Hensbergen  *
685ee443996SEric Van Hensbergen  * This sets up a transport channel for 9p communication.  Right now
686b530cc79SEric Van Hensbergen  * we only match the first available channel, but eventually we could look up
687b530cc79SEric Van Hensbergen  * alternate channels by matching devname versus a virtio_config entry.
688b530cc79SEric Van Hensbergen  * We use a simple reference count mechanism to ensure that only a single
689ee443996SEric Van Hensbergen  * mount has a channel open at a time.
690ee443996SEric Van Hensbergen  *
691ee443996SEric Van Hensbergen  */
692ee443996SEric Van Hensbergen 
6938b81ef58SEric Van Hensbergen static int
p9_virtio_create(struct p9_client * client,const char * devname,char * args)6948b81ef58SEric Van Hensbergen p9_virtio_create(struct p9_client *client, const char *devname, char *args)
695b530cc79SEric Van Hensbergen {
69637c1209dSAneesh Kumar K.V 	struct virtio_chan *chan;
697c1a7c226SAneesh Kumar K.V 	int ret = -ENOENT;
69837c1209dSAneesh Kumar K.V 	int found = 0;
699b530cc79SEric Van Hensbergen 
70010aa1452STomas Bortoli 	if (devname == NULL)
70110aa1452STomas Bortoli 		return -EINVAL;
70210aa1452STomas Bortoli 
703c1549497SJosef 'Jeff' Sipek 	mutex_lock(&virtio_9p_lock);
70437c1209dSAneesh Kumar K.V 	list_for_each_entry(chan, &virtio_chan_list, chan_list) {
705edcd9d97Spiaojun 		if (!strcmp(devname, chan->tag)) {
706f75580c4SAneesh Kumar K.V 			if (!chan->inuse) {
707b530cc79SEric Van Hensbergen 				chan->inuse = true;
70837c1209dSAneesh Kumar K.V 				found = 1;
709b530cc79SEric Van Hensbergen 				break;
710f75580c4SAneesh Kumar K.V 			}
711c1a7c226SAneesh Kumar K.V 			ret = -EBUSY;
712f75580c4SAneesh Kumar K.V 		}
713b530cc79SEric Van Hensbergen 	}
714c1549497SJosef 'Jeff' Sipek 	mutex_unlock(&virtio_9p_lock);
715b530cc79SEric Van Hensbergen 
71637c1209dSAneesh Kumar K.V 	if (!found) {
717c7c72c5aSAneesh Kumar K.V 		pr_err("no channels available for device %s\n", devname);
718c1a7c226SAneesh Kumar K.V 		return ret;
719b530cc79SEric Van Hensbergen 	}
720b530cc79SEric Van Hensbergen 
7218b81ef58SEric Van Hensbergen 	client->trans = (void *)chan;
722562ada61SEric Van Hensbergen 	client->status = Connected;
723fea511a6SEric Van Hensbergen 	chan->client = client;
724b530cc79SEric Van Hensbergen 
7258b81ef58SEric Van Hensbergen 	return 0;
726b530cc79SEric Van Hensbergen }
727b530cc79SEric Van Hensbergen 
728ee443996SEric Van Hensbergen /**
729ee443996SEric Van Hensbergen  * p9_virtio_remove - clean up resources associated with a virtio device
730ee443996SEric Van Hensbergen  * @vdev: virtio device to remove
731ee443996SEric Van Hensbergen  *
732ee443996SEric Van Hensbergen  */
733ee443996SEric Van Hensbergen 
p9_virtio_remove(struct virtio_device * vdev)734f3933545SEric Van Hensbergen static void p9_virtio_remove(struct virtio_device *vdev)
735f3933545SEric Van Hensbergen {
736f3933545SEric Van Hensbergen 	struct virtio_chan *chan = vdev->priv;
7378051a2a5SMichael S. Tsirkin 	unsigned long warning_time;
73837c1209dSAneesh Kumar K.V 
73937c1209dSAneesh Kumar K.V 	mutex_lock(&virtio_9p_lock);
7408051a2a5SMichael S. Tsirkin 
7418051a2a5SMichael S. Tsirkin 	/* Remove self from list so we don't get new users. */
74237c1209dSAneesh Kumar K.V 	list_del(&chan->chan_list);
7438051a2a5SMichael S. Tsirkin 	warning_time = jiffies;
7448051a2a5SMichael S. Tsirkin 
7458051a2a5SMichael S. Tsirkin 	/* Wait for existing users to close. */
7468051a2a5SMichael S. Tsirkin 	while (chan->inuse) {
74737c1209dSAneesh Kumar K.V 		mutex_unlock(&virtio_9p_lock);
7488051a2a5SMichael S. Tsirkin 		msleep(250);
7498051a2a5SMichael S. Tsirkin 		if (time_after(jiffies, warning_time + 10 * HZ)) {
7508051a2a5SMichael S. Tsirkin 			dev_emerg(&vdev->dev,
7518051a2a5SMichael S. Tsirkin 				  "p9_virtio_remove: waiting for device in use.\n");
7528051a2a5SMichael S. Tsirkin 			warning_time = jiffies;
7538051a2a5SMichael S. Tsirkin 		}
7548051a2a5SMichael S. Tsirkin 		mutex_lock(&virtio_9p_lock);
7558051a2a5SMichael S. Tsirkin 	}
7568051a2a5SMichael S. Tsirkin 
7578051a2a5SMichael S. Tsirkin 	mutex_unlock(&virtio_9p_lock);
7588051a2a5SMichael S. Tsirkin 
759d9679d00SMichael S. Tsirkin 	virtio_reset_device(vdev);
7608051a2a5SMichael S. Tsirkin 	vdev->config->del_vqs(vdev);
7618051a2a5SMichael S. Tsirkin 
76286c84373SAneesh Kumar K.V 	sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
763e0d6cb9cSMichael Marineau 	kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
76497ee9b02SAneesh Kumar K.V 	kfree(chan->tag);
76552f44e0dSVenkateswararao Jujjuri (JV) 	kfree(chan->vc_wq);
76637c1209dSAneesh Kumar K.V 	kfree(chan);
76737c1209dSAneesh Kumar K.V 
768f3933545SEric Van Hensbergen }
769f3933545SEric Van Hensbergen 
770b530cc79SEric Van Hensbergen static struct virtio_device_id id_table[] = {
771b530cc79SEric Van Hensbergen 	{ VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID },
772b530cc79SEric Van Hensbergen 	{ 0 },
773b530cc79SEric Van Hensbergen };
774b530cc79SEric Van Hensbergen 
77597ee9b02SAneesh Kumar K.V static unsigned int features[] = {
77697ee9b02SAneesh Kumar K.V 	VIRTIO_9P_MOUNT_TAG,
77797ee9b02SAneesh Kumar K.V };
77897ee9b02SAneesh Kumar K.V 
779b530cc79SEric Van Hensbergen /* The standard "struct lguest_driver": */
780b530cc79SEric Van Hensbergen static struct virtio_driver p9_virtio_drv = {
78197ee9b02SAneesh Kumar K.V 	.feature_table  = features,
78297ee9b02SAneesh Kumar K.V 	.feature_table_size = ARRAY_SIZE(features),
783b530cc79SEric Van Hensbergen 	.driver.name    = KBUILD_MODNAME,
784b530cc79SEric Van Hensbergen 	.driver.owner	= THIS_MODULE,
785b530cc79SEric Van Hensbergen 	.id_table	= id_table,
786b530cc79SEric Van Hensbergen 	.probe		= p9_virtio_probe,
787f3933545SEric Van Hensbergen 	.remove		= p9_virtio_remove,
788b530cc79SEric Van Hensbergen };
789b530cc79SEric Van Hensbergen 
790b530cc79SEric Van Hensbergen static struct p9_trans_module p9_virtio_trans = {
791b530cc79SEric Van Hensbergen 	.name = "virtio",
792b530cc79SEric Van Hensbergen 	.create = p9_virtio_create,
7938b81ef58SEric Van Hensbergen 	.close = p9_virtio_close,
79491b8534fSEric Van Hensbergen 	.request = p9_virtio_request,
795dc893e19SArnd Bergmann 	.zc_request = p9_virtio_zc_request,
79691b8534fSEric Van Hensbergen 	.cancel = p9_virtio_cancel,
797728356deSTomas Bortoli 	.cancelled = p9_virtio_cancelled,
798b49d8b5dSAneesh Kumar K.V 	/*
799b49d8b5dSAneesh Kumar K.V 	 * We leave one entry for input and one entry for response
8008ab1784dSZheng Yongjun 	 * headers. We also skip one more entry to accommodate, address
801b49d8b5dSAneesh Kumar K.V 	 * that are not at page boundary, that can result in an extra
802b49d8b5dSAneesh Kumar K.V 	 * page in zero copy.
803b49d8b5dSAneesh Kumar K.V 	 */
804b49d8b5dSAneesh Kumar K.V 	.maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
80501d205d9SChristian Schoenebeck 	.pooled_rbuffers = false,
806f94741fdSEric Van Hensbergen 	.def = 1,
80772029fe8STejun Heo 	.owner = THIS_MODULE,
808b530cc79SEric Van Hensbergen };
809b530cc79SEric Van Hensbergen 
810b530cc79SEric Van Hensbergen /* The standard init function */
p9_virtio_init(void)811b530cc79SEric Van Hensbergen static int __init p9_virtio_init(void)
812b530cc79SEric Van Hensbergen {
813d4548543SYueHaibing 	int rc;
814d4548543SYueHaibing 
81537c1209dSAneesh Kumar K.V 	INIT_LIST_HEAD(&virtio_chan_list);
816b530cc79SEric Van Hensbergen 
817b530cc79SEric Van Hensbergen 	v9fs_register_trans(&p9_virtio_trans);
818d4548543SYueHaibing 	rc = register_virtio_driver(&p9_virtio_drv);
819d4548543SYueHaibing 	if (rc)
820d4548543SYueHaibing 		v9fs_unregister_trans(&p9_virtio_trans);
821d4548543SYueHaibing 
822d4548543SYueHaibing 	return rc;
823b530cc79SEric Van Hensbergen }
824b530cc79SEric Van Hensbergen 
p9_virtio_cleanup(void)825f3933545SEric Van Hensbergen static void __exit p9_virtio_cleanup(void)
826f3933545SEric Van Hensbergen {
827f3933545SEric Van Hensbergen 	unregister_virtio_driver(&p9_virtio_drv);
82872029fe8STejun Heo 	v9fs_unregister_trans(&p9_virtio_trans);
829f3933545SEric Van Hensbergen }
830f3933545SEric Van Hensbergen 
831b530cc79SEric Van Hensbergen module_init(p9_virtio_init);
832f3933545SEric Van Hensbergen module_exit(p9_virtio_cleanup);
8334cd82a5bSThomas Weißschuh MODULE_ALIAS_9P("virtio");
834b530cc79SEric Van Hensbergen 
835b530cc79SEric Van Hensbergen MODULE_DEVICE_TABLE(virtio, id_table);
836b530cc79SEric Van Hensbergen MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
837b530cc79SEric Van Hensbergen MODULE_DESCRIPTION("Virtio 9p Transport");
838b530cc79SEric Van Hensbergen MODULE_LICENSE("GPL");
839