1b530cc79SEric Van Hensbergen /* 2fea511a6SEric Van Hensbergen * The Virtio 9p transport driver 3b530cc79SEric Van Hensbergen * 4e2735b77SEric Van Hensbergen * This is a block based transport driver based on the lguest block driver 5e2735b77SEric Van Hensbergen * code. 6b530cc79SEric Van Hensbergen * 7fea511a6SEric Van Hensbergen * Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation 8b530cc79SEric Van Hensbergen * 9b530cc79SEric Van Hensbergen * Based on virtio console driver 10b530cc79SEric Van Hensbergen * Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation 11b530cc79SEric Van Hensbergen * 12b530cc79SEric Van Hensbergen * This program is free software; you can redistribute it and/or modify 13b530cc79SEric Van Hensbergen * it under the terms of the GNU General Public License version 2 14b530cc79SEric Van Hensbergen * as published by the Free Software Foundation. 15b530cc79SEric Van Hensbergen * 16b530cc79SEric Van Hensbergen * This program is distributed in the hope that it will be useful, 17b530cc79SEric Van Hensbergen * but WITHOUT ANY WARRANTY; without even the implied warranty of 18b530cc79SEric Van Hensbergen * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19b530cc79SEric Van Hensbergen * GNU General Public License for more details. 20b530cc79SEric Van Hensbergen * 21b530cc79SEric Van Hensbergen * You should have received a copy of the GNU General Public License 22b530cc79SEric Van Hensbergen * along with this program; if not, write to: 23b530cc79SEric Van Hensbergen * Free Software Foundation 24b530cc79SEric Van Hensbergen * 51 Franklin Street, Fifth Floor 25b530cc79SEric Van Hensbergen * Boston, MA 02111-1301 USA 26b530cc79SEric Van Hensbergen * 27b530cc79SEric Van Hensbergen */ 28b530cc79SEric Van Hensbergen 295d385153SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 305d385153SJoe Perches 31b530cc79SEric Van Hensbergen #include <linux/in.h> 32b530cc79SEric Van Hensbergen #include <linux/module.h> 33b530cc79SEric Van Hensbergen #include <linux/net.h> 34b530cc79SEric Van Hensbergen #include <linux/ipv6.h> 35b530cc79SEric Van Hensbergen #include <linux/errno.h> 36b530cc79SEric Van Hensbergen #include <linux/kernel.h> 37b530cc79SEric Van Hensbergen #include <linux/un.h> 38b530cc79SEric Van Hensbergen #include <linux/uaccess.h> 39b530cc79SEric Van Hensbergen #include <linux/inet.h> 40b530cc79SEric Van Hensbergen #include <linux/idr.h> 41b530cc79SEric Van Hensbergen #include <linux/file.h> 42b9cdc88dSWill Deacon #include <linux/highmem.h> 435a0e3ad6STejun Heo #include <linux/slab.h> 44b530cc79SEric Van Hensbergen #include <net/9p/9p.h> 45b530cc79SEric Van Hensbergen #include <linux/parser.h> 468b81ef58SEric Van Hensbergen #include <net/9p/client.h> 47b530cc79SEric Van Hensbergen #include <net/9p/transport.h> 48b530cc79SEric Van Hensbergen #include <linux/scatterlist.h> 4968da9ba4SVenkateswararao Jujjuri (JV) #include <linux/swap.h> 50b530cc79SEric Van Hensbergen #include <linux/virtio.h> 51b530cc79SEric Van Hensbergen #include <linux/virtio_9p.h> 524038866dSVenkateswararao Jujjuri (JV) #include "trans_common.h" 53b530cc79SEric Van Hensbergen 54e2735b77SEric Van Hensbergen #define VIRTQUEUE_NUM 128 55e2735b77SEric Van Hensbergen 56b530cc79SEric Van Hensbergen /* a single mutex to manage channel initialization and attachment */ 57c1549497SJosef 'Jeff' Sipek static DEFINE_MUTEX(virtio_9p_lock); 5868da9ba4SVenkateswararao Jujjuri (JV) static DECLARE_WAIT_QUEUE_HEAD(vp_wq); 5968da9ba4SVenkateswararao Jujjuri (JV) static atomic_t vp_pinned = ATOMIC_INIT(0); 60b530cc79SEric Van Hensbergen 61ee443996SEric Van Hensbergen /** 62ee443996SEric Van Hensbergen * struct virtio_chan - per-instance transport information 63ee443996SEric Van Hensbergen * @initialized: whether the channel is initialized 64ee443996SEric Van Hensbergen * @inuse: whether the channel is in use 65ee443996SEric Van Hensbergen * @lock: protects multiple elements within this structure 660e15597eSAbhishek Kulkarni * @client: client instance 67ee443996SEric Van Hensbergen * @vdev: virtio dev associated with this channel 68ee443996SEric Van Hensbergen * @vq: virtio queue associated with this channel 69ee443996SEric Van Hensbergen * @sg: scatter gather list which is used to pack a request (protected?) 70ee443996SEric Van Hensbergen * 71ee443996SEric Van Hensbergen * We keep all per-channel information in a structure. 72b530cc79SEric Van Hensbergen * This structure is allocated within the devices dev->mem space. 73b530cc79SEric Van Hensbergen * A pointer to the structure will get put in the transport private. 74ee443996SEric Van Hensbergen * 75b530cc79SEric Van Hensbergen */ 76ee443996SEric Van Hensbergen 7737c1209dSAneesh Kumar K.V struct virtio_chan { 78ee443996SEric Van Hensbergen bool inuse; 79b530cc79SEric Van Hensbergen 80e2735b77SEric Van Hensbergen spinlock_t lock; 81e2735b77SEric Van Hensbergen 82fea511a6SEric Van Hensbergen struct p9_client *client; 83b530cc79SEric Van Hensbergen struct virtio_device *vdev; 84e2735b77SEric Van Hensbergen struct virtqueue *vq; 8552f44e0dSVenkateswararao Jujjuri (JV) int ring_bufs_avail; 8652f44e0dSVenkateswararao Jujjuri (JV) wait_queue_head_t *vc_wq; 8768da9ba4SVenkateswararao Jujjuri (JV) /* This is global limit. Since we don't have a global structure, 8868da9ba4SVenkateswararao Jujjuri (JV) * will be placing it in each channel. 8968da9ba4SVenkateswararao Jujjuri (JV) */ 907293bfbaSZhang Yanfei unsigned long p9_max_pages; 91e2735b77SEric Van Hensbergen /* Scatterlist: can be too big for stack. */ 92e2735b77SEric Van Hensbergen struct scatterlist sg[VIRTQUEUE_NUM]; 9337c1209dSAneesh Kumar K.V 9497ee9b02SAneesh Kumar K.V int tag_len; 9597ee9b02SAneesh Kumar K.V /* 9697ee9b02SAneesh Kumar K.V * tag name to identify a mount Non-null terminated 9797ee9b02SAneesh Kumar K.V */ 9897ee9b02SAneesh Kumar K.V char *tag; 9997ee9b02SAneesh Kumar K.V 10037c1209dSAneesh Kumar K.V struct list_head chan_list; 10137c1209dSAneesh Kumar K.V }; 10237c1209dSAneesh Kumar K.V 10337c1209dSAneesh Kumar K.V static struct list_head virtio_chan_list; 104b530cc79SEric Van Hensbergen 105b530cc79SEric Van Hensbergen /* How many bytes left in this page. */ 106b530cc79SEric Van Hensbergen static unsigned int rest_of_page(void *data) 107b530cc79SEric Van Hensbergen { 108b530cc79SEric Van Hensbergen return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); 109b530cc79SEric Van Hensbergen } 110b530cc79SEric Van Hensbergen 111ee443996SEric Van Hensbergen /** 112ee443996SEric Van Hensbergen * p9_virtio_close - reclaim resources of a channel 1130e15597eSAbhishek Kulkarni * @client: client instance 114ee443996SEric Van Hensbergen * 115ee443996SEric Van Hensbergen * This reclaims a channel by freeing its resources and 116ee443996SEric Van Hensbergen * reseting its inuse flag. 117ee443996SEric Van Hensbergen * 118ee443996SEric Van Hensbergen */ 119ee443996SEric Van Hensbergen 1208b81ef58SEric Van Hensbergen static void p9_virtio_close(struct p9_client *client) 121b530cc79SEric Van Hensbergen { 1228b81ef58SEric Van Hensbergen struct virtio_chan *chan = client->trans; 123b530cc79SEric Van Hensbergen 124c1549497SJosef 'Jeff' Sipek mutex_lock(&virtio_9p_lock); 125fb786100SAneesh Kumar K.V if (chan) 126b530cc79SEric Van Hensbergen chan->inuse = false; 127c1549497SJosef 'Jeff' Sipek mutex_unlock(&virtio_9p_lock); 128b530cc79SEric Van Hensbergen } 129b530cc79SEric Van Hensbergen 130ee443996SEric Van Hensbergen /** 131ee443996SEric Van Hensbergen * req_done - callback which signals activity from the server 132ee443996SEric Van Hensbergen * @vq: virtio queue activity was received on 133ee443996SEric Van Hensbergen * 134ee443996SEric Van Hensbergen * This notifies us that the server has triggered some activity 135ee443996SEric Van Hensbergen * on the virtio channel - most likely a response to request we 136ee443996SEric Van Hensbergen * sent. Figure out which requests now have responses and wake up 137ee443996SEric Van Hensbergen * those threads. 138ee443996SEric Van Hensbergen * 139ee443996SEric Van Hensbergen * Bugs: could do with some additional sanity checking, but appears to work. 140ee443996SEric Van Hensbergen * 141ee443996SEric Van Hensbergen */ 142ee443996SEric Van Hensbergen 143e2735b77SEric Van Hensbergen static void req_done(struct virtqueue *vq) 144b530cc79SEric Van Hensbergen { 145e2735b77SEric Van Hensbergen struct virtio_chan *chan = vq->vdev->priv; 146e2735b77SEric Van Hensbergen struct p9_fcall *rc; 147e2735b77SEric Van Hensbergen unsigned int len; 148e2735b77SEric Van Hensbergen struct p9_req_t *req; 149419b3956SVenkateswararao Jujjuri (JV) unsigned long flags; 150b530cc79SEric Van Hensbergen 1515d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, ": request done\n"); 15291b8534fSEric Van Hensbergen 153a01a9840SVenkateswararao Jujjuri (JV) while (1) { 154419b3956SVenkateswararao Jujjuri (JV) spin_lock_irqsave(&chan->lock, flags); 155419b3956SVenkateswararao Jujjuri (JV) rc = virtqueue_get_buf(chan->vq, &len); 156a01a9840SVenkateswararao Jujjuri (JV) if (rc == NULL) { 157a01a9840SVenkateswararao Jujjuri (JV) spin_unlock_irqrestore(&chan->lock, flags); 158a01a9840SVenkateswararao Jujjuri (JV) break; 159a01a9840SVenkateswararao Jujjuri (JV) } 16052f44e0dSVenkateswararao Jujjuri (JV) chan->ring_bufs_avail = 1; 16152f44e0dSVenkateswararao Jujjuri (JV) spin_unlock_irqrestore(&chan->lock, flags); 16253bda3e5SVenkateswararao Jujjuri (JV) /* Wakeup if anyone waiting for VirtIO ring space. */ 16353bda3e5SVenkateswararao Jujjuri (JV) wake_up(chan->vc_wq); 1645d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc); 1655d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); 166fea511a6SEric Van Hensbergen req = p9_tag_lookup(chan->client, rc->tag); 1672b6e72edSDominique Martinet p9_client_cb(chan->client, req, REQ_STATUS_RCVD); 168e2735b77SEric Van Hensbergen } 169b530cc79SEric Van Hensbergen } 170b530cc79SEric Van Hensbergen 171ee443996SEric Van Hensbergen /** 172ee443996SEric Van Hensbergen * pack_sg_list - pack a scatter gather list from a linear buffer 173ee443996SEric Van Hensbergen * @sg: scatter/gather list to pack into 174ee443996SEric Van Hensbergen * @start: which segment of the sg_list to start at 175ee443996SEric Van Hensbergen * @limit: maximum segment to pack data to 176ee443996SEric Van Hensbergen * @data: data to pack into scatter/gather list 177ee443996SEric Van Hensbergen * @count: amount of data to pack into the scatter/gather list 178ee443996SEric Van Hensbergen * 179ee443996SEric Van Hensbergen * sg_lists have multiple segments of various sizes. This will pack 180ee443996SEric Van Hensbergen * arbitrary data into an existing scatter gather list, segmenting the 181ee443996SEric Van Hensbergen * data as necessary within constraints. 182ee443996SEric Van Hensbergen * 183ee443996SEric Van Hensbergen */ 184ee443996SEric Van Hensbergen 185abfa034eSAneesh Kumar K.V static int pack_sg_list(struct scatterlist *sg, int start, 186abfa034eSAneesh Kumar K.V int limit, char *data, int count) 187e2735b77SEric Van Hensbergen { 188e2735b77SEric Van Hensbergen int s; 189e2735b77SEric Van Hensbergen int index = start; 190e2735b77SEric Van Hensbergen 191e2735b77SEric Van Hensbergen while (count) { 192e2735b77SEric Van Hensbergen s = rest_of_page(data); 193e2735b77SEric Van Hensbergen if (s > count) 194e2735b77SEric Van Hensbergen s = count; 1955fcb08beSSasha Levin BUG_ON(index > limit); 1960b36f1adSRusty Russell /* Make sure we don't terminate early. */ 1970b36f1adSRusty Russell sg_unmark_end(&sg[index]); 198e2735b77SEric Van Hensbergen sg_set_buf(&sg[index++], data, s); 199e2735b77SEric Van Hensbergen count -= s; 200e2735b77SEric Van Hensbergen data += s; 201e2735b77SEric Van Hensbergen } 2020b36f1adSRusty Russell if (index-start) 2030b36f1adSRusty Russell sg_mark_end(&sg[index - 1]); 204e2735b77SEric Van Hensbergen return index-start; 205e2735b77SEric Van Hensbergen } 206e2735b77SEric Van Hensbergen 20791b8534fSEric Van Hensbergen /* We don't currently allow canceling of virtio requests */ 20891b8534fSEric Van Hensbergen static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req) 20991b8534fSEric Van Hensbergen { 21091b8534fSEric Van Hensbergen return 1; 21191b8534fSEric Van Hensbergen } 21291b8534fSEric Van Hensbergen 213ee443996SEric Van Hensbergen /** 2144038866dSVenkateswararao Jujjuri (JV) * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer, 2154038866dSVenkateswararao Jujjuri (JV) * this takes a list of pages. 2164038866dSVenkateswararao Jujjuri (JV) * @sg: scatter/gather list to pack into 2174038866dSVenkateswararao Jujjuri (JV) * @start: which segment of the sg_list to start at 2182c53040fSBen Hutchings * @pdata: a list of pages to add into sg. 219abfa034eSAneesh Kumar K.V * @nr_pages: number of pages to pack into the scatter/gather list 220abfa034eSAneesh Kumar K.V * @data: data to pack into scatter/gather list 2214038866dSVenkateswararao Jujjuri (JV) * @count: amount of data to pack into the scatter/gather list 2224038866dSVenkateswararao Jujjuri (JV) */ 2234038866dSVenkateswararao Jujjuri (JV) static int 224abfa034eSAneesh Kumar K.V pack_sg_list_p(struct scatterlist *sg, int start, int limit, 225abfa034eSAneesh Kumar K.V struct page **pdata, int nr_pages, char *data, int count) 2264038866dSVenkateswararao Jujjuri (JV) { 227abfa034eSAneesh Kumar K.V int i = 0, s; 228abfa034eSAneesh Kumar K.V int data_off; 2294038866dSVenkateswararao Jujjuri (JV) int index = start; 2304038866dSVenkateswararao Jujjuri (JV) 231abfa034eSAneesh Kumar K.V BUG_ON(nr_pages > (limit - start)); 232abfa034eSAneesh Kumar K.V /* 233abfa034eSAneesh Kumar K.V * if the first page doesn't start at 234abfa034eSAneesh Kumar K.V * page boundary find the offset 235abfa034eSAneesh Kumar K.V */ 236abfa034eSAneesh Kumar K.V data_off = offset_in_page(data); 237abfa034eSAneesh Kumar K.V while (nr_pages) { 238abfa034eSAneesh Kumar K.V s = rest_of_page(data); 239abfa034eSAneesh Kumar K.V if (s > count) 240abfa034eSAneesh Kumar K.V s = count; 2410b36f1adSRusty Russell /* Make sure we don't terminate early. */ 2420b36f1adSRusty Russell sg_unmark_end(&sg[index]); 243abfa034eSAneesh Kumar K.V sg_set_page(&sg[index++], pdata[i++], s, data_off); 244abfa034eSAneesh Kumar K.V data_off = 0; 245abfa034eSAneesh Kumar K.V data += s; 2464038866dSVenkateswararao Jujjuri (JV) count -= s; 247abfa034eSAneesh Kumar K.V nr_pages--; 2484038866dSVenkateswararao Jujjuri (JV) } 2490b36f1adSRusty Russell 2500b36f1adSRusty Russell if (index-start) 2510b36f1adSRusty Russell sg_mark_end(&sg[index - 1]); 2524038866dSVenkateswararao Jujjuri (JV) return index - start; 2534038866dSVenkateswararao Jujjuri (JV) } 2544038866dSVenkateswararao Jujjuri (JV) 2554038866dSVenkateswararao Jujjuri (JV) /** 25691b8534fSEric Van Hensbergen * p9_virtio_request - issue a request 2570e15597eSAbhishek Kulkarni * @client: client instance issuing the request 2580e15597eSAbhishek Kulkarni * @req: request to be issued 259ee443996SEric Van Hensbergen * 260ee443996SEric Van Hensbergen */ 261ee443996SEric Van Hensbergen 262e2735b77SEric Van Hensbergen static int 26391b8534fSEric Van Hensbergen p9_virtio_request(struct p9_client *client, struct p9_req_t *req) 264e2735b77SEric Van Hensbergen { 265abfa034eSAneesh Kumar K.V int err; 2660b36f1adSRusty Russell int in, out, out_sgs, in_sgs; 267419b3956SVenkateswararao Jujjuri (JV) unsigned long flags; 268abfa034eSAneesh Kumar K.V struct virtio_chan *chan = client->trans; 2690b36f1adSRusty Russell struct scatterlist *sgs[2]; 270e2735b77SEric Van Hensbergen 2715d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n"); 272e2735b77SEric Van Hensbergen 273419b3956SVenkateswararao Jujjuri (JV) req->status = REQ_STATUS_SENT; 274abfa034eSAneesh Kumar K.V req_retry: 275419b3956SVenkateswararao Jujjuri (JV) spin_lock_irqsave(&chan->lock, flags); 2764038866dSVenkateswararao Jujjuri (JV) 2770b36f1adSRusty Russell out_sgs = in_sgs = 0; 2784038866dSVenkateswararao Jujjuri (JV) /* Handle out VirtIO ring buffers */ 279abfa034eSAneesh Kumar K.V out = pack_sg_list(chan->sg, 0, 280abfa034eSAneesh Kumar K.V VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); 2810b36f1adSRusty Russell if (out) 2820b36f1adSRusty Russell sgs[out_sgs++] = chan->sg; 2834038866dSVenkateswararao Jujjuri (JV) 284abfa034eSAneesh Kumar K.V in = pack_sg_list(chan->sg, out, 285abfa034eSAneesh Kumar K.V VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity); 2860b36f1adSRusty Russell if (in) 2870b36f1adSRusty Russell sgs[out_sgs + in_sgs++] = chan->sg + out; 288e2735b77SEric Van Hensbergen 2890b36f1adSRusty Russell err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, 290f96fde41SRusty Russell GFP_ATOMIC); 291419b3956SVenkateswararao Jujjuri (JV) if (err < 0) { 29252f44e0dSVenkateswararao Jujjuri (JV) if (err == -ENOSPC) { 29352f44e0dSVenkateswararao Jujjuri (JV) chan->ring_bufs_avail = 0; 29452f44e0dSVenkateswararao Jujjuri (JV) spin_unlock_irqrestore(&chan->lock, flags); 29552f44e0dSVenkateswararao Jujjuri (JV) err = wait_event_interruptible(*chan->vc_wq, 29652f44e0dSVenkateswararao Jujjuri (JV) chan->ring_bufs_avail); 29752f44e0dSVenkateswararao Jujjuri (JV) if (err == -ERESTARTSYS) 29852f44e0dSVenkateswararao Jujjuri (JV) return err; 29952f44e0dSVenkateswararao Jujjuri (JV) 3005d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); 301abfa034eSAneesh Kumar K.V goto req_retry; 302abfa034eSAneesh Kumar K.V } else { 303abfa034eSAneesh Kumar K.V spin_unlock_irqrestore(&chan->lock, flags); 3045d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, 3050b36f1adSRusty Russell "virtio rpc add_sgs returned failure\n"); 306abfa034eSAneesh Kumar K.V return -EIO; 307abfa034eSAneesh Kumar K.V } 308abfa034eSAneesh Kumar K.V } 309abfa034eSAneesh Kumar K.V virtqueue_kick(chan->vq); 310abfa034eSAneesh Kumar K.V spin_unlock_irqrestore(&chan->lock, flags); 311abfa034eSAneesh Kumar K.V 3125d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); 313abfa034eSAneesh Kumar K.V return 0; 314abfa034eSAneesh Kumar K.V } 315abfa034eSAneesh Kumar K.V 316abfa034eSAneesh Kumar K.V static int p9_get_mapped_pages(struct virtio_chan *chan, 317abfa034eSAneesh Kumar K.V struct page **pages, char *data, 318abfa034eSAneesh Kumar K.V int nr_pages, int write, int kern_buf) 319abfa034eSAneesh Kumar K.V { 320abfa034eSAneesh Kumar K.V int err; 321abfa034eSAneesh Kumar K.V if (!kern_buf) { 322abfa034eSAneesh Kumar K.V /* 323abfa034eSAneesh Kumar K.V * We allow only p9_max_pages pinned. We wait for the 324abfa034eSAneesh Kumar K.V * Other zc request to finish here 325abfa034eSAneesh Kumar K.V */ 326abfa034eSAneesh Kumar K.V if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { 327abfa034eSAneesh Kumar K.V err = wait_event_interruptible(vp_wq, 328abfa034eSAneesh Kumar K.V (atomic_read(&vp_pinned) < chan->p9_max_pages)); 329abfa034eSAneesh Kumar K.V if (err == -ERESTARTSYS) 330abfa034eSAneesh Kumar K.V return err; 331abfa034eSAneesh Kumar K.V } 332abfa034eSAneesh Kumar K.V err = p9_payload_gup(data, &nr_pages, pages, write); 333abfa034eSAneesh Kumar K.V if (err < 0) 334abfa034eSAneesh Kumar K.V return err; 335abfa034eSAneesh Kumar K.V atomic_add(nr_pages, &vp_pinned); 336abfa034eSAneesh Kumar K.V } else { 337abfa034eSAneesh Kumar K.V /* kernel buffer, no need to pin pages */ 338abfa034eSAneesh Kumar K.V int s, index = 0; 339abfa034eSAneesh Kumar K.V int count = nr_pages; 340abfa034eSAneesh Kumar K.V while (nr_pages) { 341abfa034eSAneesh Kumar K.V s = rest_of_page(data); 342b6f52ae2SRichard Yao if (is_vmalloc_addr(data)) 343b6f52ae2SRichard Yao pages[index++] = vmalloc_to_page(data); 344b6f52ae2SRichard Yao else 345b9cdc88dSWill Deacon pages[index++] = kmap_to_page(data); 346abfa034eSAneesh Kumar K.V data += s; 347abfa034eSAneesh Kumar K.V nr_pages--; 348abfa034eSAneesh Kumar K.V } 349abfa034eSAneesh Kumar K.V nr_pages = count; 350abfa034eSAneesh Kumar K.V } 351abfa034eSAneesh Kumar K.V return nr_pages; 352abfa034eSAneesh Kumar K.V } 353abfa034eSAneesh Kumar K.V 354abfa034eSAneesh Kumar K.V /** 355abfa034eSAneesh Kumar K.V * p9_virtio_zc_request - issue a zero copy request 356abfa034eSAneesh Kumar K.V * @client: client instance issuing the request 357abfa034eSAneesh Kumar K.V * @req: request to be issued 358abfa034eSAneesh Kumar K.V * @uidata: user bffer that should be ued for zero copy read 359abfa034eSAneesh Kumar K.V * @uodata: user buffer that shoud be user for zero copy write 360abfa034eSAneesh Kumar K.V * @inlen: read buffer size 361abfa034eSAneesh Kumar K.V * @olen: write buffer size 362abfa034eSAneesh Kumar K.V * @hdrlen: reader header size, This is the size of response protocol data 363abfa034eSAneesh Kumar K.V * 364abfa034eSAneesh Kumar K.V */ 365abfa034eSAneesh Kumar K.V static int 366abfa034eSAneesh Kumar K.V p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, 367abfa034eSAneesh Kumar K.V char *uidata, char *uodata, int inlen, 368abfa034eSAneesh Kumar K.V int outlen, int in_hdr_len, int kern_buf) 369abfa034eSAneesh Kumar K.V { 3700b36f1adSRusty Russell int in, out, err, out_sgs, in_sgs; 371abfa034eSAneesh Kumar K.V unsigned long flags; 372abfa034eSAneesh Kumar K.V int in_nr_pages = 0, out_nr_pages = 0; 373abfa034eSAneesh Kumar K.V struct page **in_pages = NULL, **out_pages = NULL; 374abfa034eSAneesh Kumar K.V struct virtio_chan *chan = client->trans; 3750b36f1adSRusty Russell struct scatterlist *sgs[4]; 376abfa034eSAneesh Kumar K.V 3775d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, "virtio request\n"); 378abfa034eSAneesh Kumar K.V 379abfa034eSAneesh Kumar K.V if (uodata) { 380abfa034eSAneesh Kumar K.V out_nr_pages = p9_nr_pages(uodata, outlen); 381abfa034eSAneesh Kumar K.V out_pages = kmalloc(sizeof(struct page *) * out_nr_pages, 382abfa034eSAneesh Kumar K.V GFP_NOFS); 383abfa034eSAneesh Kumar K.V if (!out_pages) { 384abfa034eSAneesh Kumar K.V err = -ENOMEM; 385abfa034eSAneesh Kumar K.V goto err_out; 386abfa034eSAneesh Kumar K.V } 387abfa034eSAneesh Kumar K.V out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata, 388abfa034eSAneesh Kumar K.V out_nr_pages, 0, kern_buf); 389abfa034eSAneesh Kumar K.V if (out_nr_pages < 0) { 390abfa034eSAneesh Kumar K.V err = out_nr_pages; 391abfa034eSAneesh Kumar K.V kfree(out_pages); 392abfa034eSAneesh Kumar K.V out_pages = NULL; 393abfa034eSAneesh Kumar K.V goto err_out; 394abfa034eSAneesh Kumar K.V } 395abfa034eSAneesh Kumar K.V } 396abfa034eSAneesh Kumar K.V if (uidata) { 397abfa034eSAneesh Kumar K.V in_nr_pages = p9_nr_pages(uidata, inlen); 398abfa034eSAneesh Kumar K.V in_pages = kmalloc(sizeof(struct page *) * in_nr_pages, 399abfa034eSAneesh Kumar K.V GFP_NOFS); 400abfa034eSAneesh Kumar K.V if (!in_pages) { 401abfa034eSAneesh Kumar K.V err = -ENOMEM; 402abfa034eSAneesh Kumar K.V goto err_out; 403abfa034eSAneesh Kumar K.V } 404abfa034eSAneesh Kumar K.V in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata, 405abfa034eSAneesh Kumar K.V in_nr_pages, 1, kern_buf); 406abfa034eSAneesh Kumar K.V if (in_nr_pages < 0) { 407abfa034eSAneesh Kumar K.V err = in_nr_pages; 408abfa034eSAneesh Kumar K.V kfree(in_pages); 409abfa034eSAneesh Kumar K.V in_pages = NULL; 410abfa034eSAneesh Kumar K.V goto err_out; 411abfa034eSAneesh Kumar K.V } 412abfa034eSAneesh Kumar K.V } 413abfa034eSAneesh Kumar K.V req->status = REQ_STATUS_SENT; 414abfa034eSAneesh Kumar K.V req_retry_pinned: 415abfa034eSAneesh Kumar K.V spin_lock_irqsave(&chan->lock, flags); 4160b36f1adSRusty Russell 4170b36f1adSRusty Russell out_sgs = in_sgs = 0; 4180b36f1adSRusty Russell 419abfa034eSAneesh Kumar K.V /* out data */ 420abfa034eSAneesh Kumar K.V out = pack_sg_list(chan->sg, 0, 421abfa034eSAneesh Kumar K.V VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); 422abfa034eSAneesh Kumar K.V 4230b36f1adSRusty Russell if (out) 4240b36f1adSRusty Russell sgs[out_sgs++] = chan->sg; 4250b36f1adSRusty Russell 4260b36f1adSRusty Russell if (out_pages) { 4270b36f1adSRusty Russell sgs[out_sgs++] = chan->sg + out; 428abfa034eSAneesh Kumar K.V out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, 429abfa034eSAneesh Kumar K.V out_pages, out_nr_pages, uodata, outlen); 4300b36f1adSRusty Russell } 4310b36f1adSRusty Russell 432abfa034eSAneesh Kumar K.V /* 433abfa034eSAneesh Kumar K.V * Take care of in data 434abfa034eSAneesh Kumar K.V * For example TREAD have 11. 435abfa034eSAneesh Kumar K.V * 11 is the read/write header = PDU Header(7) + IO Size (4). 436abfa034eSAneesh Kumar K.V * Arrange in such a way that server places header in the 437abfa034eSAneesh Kumar K.V * alloced memory and payload onto the user buffer. 438abfa034eSAneesh Kumar K.V */ 439abfa034eSAneesh Kumar K.V in = pack_sg_list(chan->sg, out, 440abfa034eSAneesh Kumar K.V VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len); 4410b36f1adSRusty Russell if (in) 4420b36f1adSRusty Russell sgs[out_sgs + in_sgs++] = chan->sg + out; 4430b36f1adSRusty Russell 4440b36f1adSRusty Russell if (in_pages) { 4450b36f1adSRusty Russell sgs[out_sgs + in_sgs++] = chan->sg + out + in; 446abfa034eSAneesh Kumar K.V in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, 447abfa034eSAneesh Kumar K.V in_pages, in_nr_pages, uidata, inlen); 4480b36f1adSRusty Russell } 449abfa034eSAneesh Kumar K.V 4500b36f1adSRusty Russell BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs)); 4510b36f1adSRusty Russell err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, 452f96fde41SRusty Russell GFP_ATOMIC); 453abfa034eSAneesh Kumar K.V if (err < 0) { 454abfa034eSAneesh Kumar K.V if (err == -ENOSPC) { 455abfa034eSAneesh Kumar K.V chan->ring_bufs_avail = 0; 456abfa034eSAneesh Kumar K.V spin_unlock_irqrestore(&chan->lock, flags); 457abfa034eSAneesh Kumar K.V err = wait_event_interruptible(*chan->vc_wq, 458abfa034eSAneesh Kumar K.V chan->ring_bufs_avail); 459abfa034eSAneesh Kumar K.V if (err == -ERESTARTSYS) 460abfa034eSAneesh Kumar K.V goto err_out; 461abfa034eSAneesh Kumar K.V 4625d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); 463316ad550SVenkateswararao Jujjuri (JV) goto req_retry_pinned; 46452f44e0dSVenkateswararao Jujjuri (JV) } else { 465419b3956SVenkateswararao Jujjuri (JV) spin_unlock_irqrestore(&chan->lock, flags); 4665d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, 4670b36f1adSRusty Russell "virtio rpc add_sgs returned failure\n"); 468abfa034eSAneesh Kumar K.V err = -EIO; 469abfa034eSAneesh Kumar K.V goto err_out; 470e2735b77SEric Van Hensbergen } 47152f44e0dSVenkateswararao Jujjuri (JV) } 472dc3f5e68SMichael S. Tsirkin virtqueue_kick(chan->vq); 473419b3956SVenkateswararao Jujjuri (JV) spin_unlock_irqrestore(&chan->lock, flags); 4745d385153SJoe Perches p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); 475abfa034eSAneesh Kumar K.V err = wait_event_interruptible(*req->wq, 476abfa034eSAneesh Kumar K.V req->status >= REQ_STATUS_RCVD); 477abfa034eSAneesh Kumar K.V /* 478abfa034eSAneesh Kumar K.V * Non kernel buffers are pinned, unpin them 479abfa034eSAneesh Kumar K.V */ 480abfa034eSAneesh Kumar K.V err_out: 481abfa034eSAneesh Kumar K.V if (!kern_buf) { 482abfa034eSAneesh Kumar K.V if (in_pages) { 483abfa034eSAneesh Kumar K.V p9_release_pages(in_pages, in_nr_pages); 484abfa034eSAneesh Kumar K.V atomic_sub(in_nr_pages, &vp_pinned); 485abfa034eSAneesh Kumar K.V } 486abfa034eSAneesh Kumar K.V if (out_pages) { 487abfa034eSAneesh Kumar K.V p9_release_pages(out_pages, out_nr_pages); 488abfa034eSAneesh Kumar K.V atomic_sub(out_nr_pages, &vp_pinned); 489abfa034eSAneesh Kumar K.V } 490abfa034eSAneesh Kumar K.V /* wakeup anybody waiting for slots to pin pages */ 491abfa034eSAneesh Kumar K.V wake_up(&vp_wq); 492abfa034eSAneesh Kumar K.V } 493abfa034eSAneesh Kumar K.V kfree(in_pages); 494abfa034eSAneesh Kumar K.V kfree(out_pages); 495abfa034eSAneesh Kumar K.V return err; 496e2735b77SEric Van Hensbergen } 497e2735b77SEric Van Hensbergen 49886c84373SAneesh Kumar K.V static ssize_t p9_mount_tag_show(struct device *dev, 49986c84373SAneesh Kumar K.V struct device_attribute *attr, char *buf) 50086c84373SAneesh Kumar K.V { 50186c84373SAneesh Kumar K.V struct virtio_chan *chan; 50286c84373SAneesh Kumar K.V struct virtio_device *vdev; 50386c84373SAneesh Kumar K.V 50486c84373SAneesh Kumar K.V vdev = dev_to_virtio(dev); 50586c84373SAneesh Kumar K.V chan = vdev->priv; 50686c84373SAneesh Kumar K.V 50786c84373SAneesh Kumar K.V return snprintf(buf, chan->tag_len + 1, "%s", chan->tag); 50886c84373SAneesh Kumar K.V } 50986c84373SAneesh Kumar K.V 51086c84373SAneesh Kumar K.V static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL); 51186c84373SAneesh Kumar K.V 512ee443996SEric Van Hensbergen /** 513ee443996SEric Van Hensbergen * p9_virtio_probe - probe for existence of 9P virtio channels 514ee443996SEric Van Hensbergen * @vdev: virtio device to probe 515ee443996SEric Van Hensbergen * 51637c1209dSAneesh Kumar K.V * This probes for existing virtio channels. 517ee443996SEric Van Hensbergen * 518ee443996SEric Van Hensbergen */ 519ee443996SEric Van Hensbergen 520e2735b77SEric Van Hensbergen static int p9_virtio_probe(struct virtio_device *vdev) 521b530cc79SEric Van Hensbergen { 52297ee9b02SAneesh Kumar K.V __u16 tag_len; 52397ee9b02SAneesh Kumar K.V char *tag; 524b530cc79SEric Van Hensbergen int err; 525b530cc79SEric Van Hensbergen struct virtio_chan *chan; 526b530cc79SEric Van Hensbergen 527*7754f53eSMichael S. Tsirkin if (!vdev->config->get) { 528*7754f53eSMichael S. Tsirkin dev_err(&vdev->dev, "%s failure: config access disabled\n", 529*7754f53eSMichael S. Tsirkin __func__); 530*7754f53eSMichael S. Tsirkin return -EINVAL; 531*7754f53eSMichael S. Tsirkin } 532*7754f53eSMichael S. Tsirkin 53337c1209dSAneesh Kumar K.V chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); 53437c1209dSAneesh Kumar K.V if (!chan) { 5355d385153SJoe Perches pr_err("Failed to allocate virtio 9P channel\n"); 536b530cc79SEric Van Hensbergen err = -ENOMEM; 537b530cc79SEric Van Hensbergen goto fail; 538b530cc79SEric Van Hensbergen } 539b530cc79SEric Van Hensbergen 540e2735b77SEric Van Hensbergen chan->vdev = vdev; 541e2735b77SEric Van Hensbergen 542e2735b77SEric Van Hensbergen /* We expect one virtqueue, for requests. */ 543d2a7dddaSMichael S. Tsirkin chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); 544e2735b77SEric Van Hensbergen if (IS_ERR(chan->vq)) { 545e2735b77SEric Van Hensbergen err = PTR_ERR(chan->vq); 546e2735b77SEric Van Hensbergen goto out_free_vq; 547b530cc79SEric Van Hensbergen } 548e2735b77SEric Van Hensbergen chan->vq->vdev->priv = chan; 549e2735b77SEric Van Hensbergen spin_lock_init(&chan->lock); 550b530cc79SEric Van Hensbergen 551e2735b77SEric Van Hensbergen sg_init_table(chan->sg, VIRTQUEUE_NUM); 552b530cc79SEric Van Hensbergen 553b530cc79SEric Van Hensbergen chan->inuse = false; 55497ee9b02SAneesh Kumar K.V if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) { 555855e0c52SRusty Russell virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len); 55697ee9b02SAneesh Kumar K.V } else { 55797ee9b02SAneesh Kumar K.V err = -EINVAL; 55897ee9b02SAneesh Kumar K.V goto out_free_vq; 55997ee9b02SAneesh Kumar K.V } 56097ee9b02SAneesh Kumar K.V tag = kmalloc(tag_len, GFP_KERNEL); 56197ee9b02SAneesh Kumar K.V if (!tag) { 56297ee9b02SAneesh Kumar K.V err = -ENOMEM; 56397ee9b02SAneesh Kumar K.V goto out_free_vq; 56497ee9b02SAneesh Kumar K.V } 565855e0c52SRusty Russell 566855e0c52SRusty Russell virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag), 56797ee9b02SAneesh Kumar K.V tag, tag_len); 56897ee9b02SAneesh Kumar K.V chan->tag = tag; 56997ee9b02SAneesh Kumar K.V chan->tag_len = tag_len; 57086c84373SAneesh Kumar K.V err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 57186c84373SAneesh Kumar K.V if (err) { 57252f44e0dSVenkateswararao Jujjuri (JV) goto out_free_tag; 57386c84373SAneesh Kumar K.V } 57452f44e0dSVenkateswararao Jujjuri (JV) chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); 57552f44e0dSVenkateswararao Jujjuri (JV) if (!chan->vc_wq) { 57652f44e0dSVenkateswararao Jujjuri (JV) err = -ENOMEM; 57752f44e0dSVenkateswararao Jujjuri (JV) goto out_free_tag; 57852f44e0dSVenkateswararao Jujjuri (JV) } 57952f44e0dSVenkateswararao Jujjuri (JV) init_waitqueue_head(chan->vc_wq); 58052f44e0dSVenkateswararao Jujjuri (JV) chan->ring_bufs_avail = 1; 58168da9ba4SVenkateswararao Jujjuri (JV) /* Ceiling limit to avoid denial of service attacks */ 58268da9ba4SVenkateswararao Jujjuri (JV) chan->p9_max_pages = nr_free_buffer_pages()/4; 58352f44e0dSVenkateswararao Jujjuri (JV) 58464b4cc39SMichael S. Tsirkin virtio_device_ready(vdev); 58564b4cc39SMichael S. Tsirkin 58637c1209dSAneesh Kumar K.V mutex_lock(&virtio_9p_lock); 58737c1209dSAneesh Kumar K.V list_add_tail(&chan->chan_list, &virtio_chan_list); 58837c1209dSAneesh Kumar K.V mutex_unlock(&virtio_9p_lock); 589e0d6cb9cSMichael Marineau 590e0d6cb9cSMichael Marineau /* Let udev rules use the new mount_tag attribute. */ 591e0d6cb9cSMichael Marineau kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); 592e0d6cb9cSMichael Marineau 593b530cc79SEric Van Hensbergen return 0; 594b530cc79SEric Van Hensbergen 59552f44e0dSVenkateswararao Jujjuri (JV) out_free_tag: 59652f44e0dSVenkateswararao Jujjuri (JV) kfree(tag); 597e2735b77SEric Van Hensbergen out_free_vq: 598d2a7dddaSMichael S. Tsirkin vdev->config->del_vqs(vdev); 59937c1209dSAneesh Kumar K.V kfree(chan); 600b530cc79SEric Van Hensbergen fail: 601b530cc79SEric Van Hensbergen return err; 602b530cc79SEric Van Hensbergen } 603b530cc79SEric Van Hensbergen 604ee443996SEric Van Hensbergen 605ee443996SEric Van Hensbergen /** 606ee443996SEric Van Hensbergen * p9_virtio_create - allocate a new virtio channel 6078b81ef58SEric Van Hensbergen * @client: client instance invoking this transport 608ee443996SEric Van Hensbergen * @devname: string identifying the channel to connect to (unused) 609ee443996SEric Van Hensbergen * @args: args passed from sys_mount() for per-transport options (unused) 610ee443996SEric Van Hensbergen * 611ee443996SEric Van Hensbergen * This sets up a transport channel for 9p communication. Right now 612b530cc79SEric Van Hensbergen * we only match the first available channel, but eventually we couldlook up 613b530cc79SEric Van Hensbergen * alternate channels by matching devname versus a virtio_config entry. 614b530cc79SEric Van Hensbergen * We use a simple reference count mechanism to ensure that only a single 615ee443996SEric Van Hensbergen * mount has a channel open at a time. 616ee443996SEric Van Hensbergen * 617ee443996SEric Van Hensbergen */ 618ee443996SEric Van Hensbergen 6198b81ef58SEric Van Hensbergen static int 6208b81ef58SEric Van Hensbergen p9_virtio_create(struct p9_client *client, const char *devname, char *args) 621b530cc79SEric Van Hensbergen { 62237c1209dSAneesh Kumar K.V struct virtio_chan *chan; 623c1a7c226SAneesh Kumar K.V int ret = -ENOENT; 62437c1209dSAneesh Kumar K.V int found = 0; 625b530cc79SEric Van Hensbergen 626c1549497SJosef 'Jeff' Sipek mutex_lock(&virtio_9p_lock); 62737c1209dSAneesh Kumar K.V list_for_each_entry(chan, &virtio_chan_list, chan_list) { 6280b20406cSSven Eckelmann if (!strncmp(devname, chan->tag, chan->tag_len) && 6290b20406cSSven Eckelmann strlen(devname) == chan->tag_len) { 630f75580c4SAneesh Kumar K.V if (!chan->inuse) { 631b530cc79SEric Van Hensbergen chan->inuse = true; 63237c1209dSAneesh Kumar K.V found = 1; 633b530cc79SEric Van Hensbergen break; 634f75580c4SAneesh Kumar K.V } 635c1a7c226SAneesh Kumar K.V ret = -EBUSY; 636f75580c4SAneesh Kumar K.V } 637b530cc79SEric Van Hensbergen } 638c1549497SJosef 'Jeff' Sipek mutex_unlock(&virtio_9p_lock); 639b530cc79SEric Van Hensbergen 64037c1209dSAneesh Kumar K.V if (!found) { 6415d385153SJoe Perches pr_err("no channels available\n"); 642c1a7c226SAneesh Kumar K.V return ret; 643b530cc79SEric Van Hensbergen } 644b530cc79SEric Van Hensbergen 6458b81ef58SEric Van Hensbergen client->trans = (void *)chan; 646562ada61SEric Van Hensbergen client->status = Connected; 647fea511a6SEric Van Hensbergen chan->client = client; 648b530cc79SEric Van Hensbergen 6498b81ef58SEric Van Hensbergen return 0; 650b530cc79SEric Van Hensbergen } 651b530cc79SEric Van Hensbergen 652ee443996SEric Van Hensbergen /** 653ee443996SEric Van Hensbergen * p9_virtio_remove - clean up resources associated with a virtio device 654ee443996SEric Van Hensbergen * @vdev: virtio device to remove 655ee443996SEric Van Hensbergen * 656ee443996SEric Van Hensbergen */ 657ee443996SEric Van Hensbergen 658f3933545SEric Van Hensbergen static void p9_virtio_remove(struct virtio_device *vdev) 659f3933545SEric Van Hensbergen { 660f3933545SEric Van Hensbergen struct virtio_chan *chan = vdev->priv; 661f3933545SEric Van Hensbergen 662991ad9ecSSasha Levin if (chan->inuse) 663991ad9ecSSasha Levin p9_virtio_close(chan->client); 664d2a7dddaSMichael S. Tsirkin vdev->config->del_vqs(vdev); 66537c1209dSAneesh Kumar K.V 66637c1209dSAneesh Kumar K.V mutex_lock(&virtio_9p_lock); 66737c1209dSAneesh Kumar K.V list_del(&chan->chan_list); 66837c1209dSAneesh Kumar K.V mutex_unlock(&virtio_9p_lock); 66986c84373SAneesh Kumar K.V sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 670e0d6cb9cSMichael Marineau kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); 67197ee9b02SAneesh Kumar K.V kfree(chan->tag); 67252f44e0dSVenkateswararao Jujjuri (JV) kfree(chan->vc_wq); 67337c1209dSAneesh Kumar K.V kfree(chan); 67437c1209dSAneesh Kumar K.V 675f3933545SEric Van Hensbergen } 676f3933545SEric Van Hensbergen 677b530cc79SEric Van Hensbergen static struct virtio_device_id id_table[] = { 678b530cc79SEric Van Hensbergen { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID }, 679b530cc79SEric Van Hensbergen { 0 }, 680b530cc79SEric Van Hensbergen }; 681b530cc79SEric Van Hensbergen 68297ee9b02SAneesh Kumar K.V static unsigned int features[] = { 68397ee9b02SAneesh Kumar K.V VIRTIO_9P_MOUNT_TAG, 68497ee9b02SAneesh Kumar K.V }; 68597ee9b02SAneesh Kumar K.V 686b530cc79SEric Van Hensbergen /* The standard "struct lguest_driver": */ 687b530cc79SEric Van Hensbergen static struct virtio_driver p9_virtio_drv = { 68897ee9b02SAneesh Kumar K.V .feature_table = features, 68997ee9b02SAneesh Kumar K.V .feature_table_size = ARRAY_SIZE(features), 690b530cc79SEric Van Hensbergen .driver.name = KBUILD_MODNAME, 691b530cc79SEric Van Hensbergen .driver.owner = THIS_MODULE, 692b530cc79SEric Van Hensbergen .id_table = id_table, 693b530cc79SEric Van Hensbergen .probe = p9_virtio_probe, 694f3933545SEric Van Hensbergen .remove = p9_virtio_remove, 695b530cc79SEric Van Hensbergen }; 696b530cc79SEric Van Hensbergen 697b530cc79SEric Van Hensbergen static struct p9_trans_module p9_virtio_trans = { 698b530cc79SEric Van Hensbergen .name = "virtio", 699b530cc79SEric Van Hensbergen .create = p9_virtio_create, 7008b81ef58SEric Van Hensbergen .close = p9_virtio_close, 70191b8534fSEric Van Hensbergen .request = p9_virtio_request, 702dc893e19SArnd Bergmann .zc_request = p9_virtio_zc_request, 70391b8534fSEric Van Hensbergen .cancel = p9_virtio_cancel, 704b49d8b5dSAneesh Kumar K.V /* 705b49d8b5dSAneesh Kumar K.V * We leave one entry for input and one entry for response 706b49d8b5dSAneesh Kumar K.V * headers. We also skip one more entry to accomodate, address 707b49d8b5dSAneesh Kumar K.V * that are not at page boundary, that can result in an extra 708b49d8b5dSAneesh Kumar K.V * page in zero copy. 709b49d8b5dSAneesh Kumar K.V */ 710b49d8b5dSAneesh Kumar K.V .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), 711f94741fdSEric Van Hensbergen .def = 1, 71272029fe8STejun Heo .owner = THIS_MODULE, 713b530cc79SEric Van Hensbergen }; 714b530cc79SEric Van Hensbergen 715b530cc79SEric Van Hensbergen /* The standard init function */ 716b530cc79SEric Van Hensbergen static int __init p9_virtio_init(void) 717b530cc79SEric Van Hensbergen { 71837c1209dSAneesh Kumar K.V INIT_LIST_HEAD(&virtio_chan_list); 719b530cc79SEric Van Hensbergen 720b530cc79SEric Van Hensbergen v9fs_register_trans(&p9_virtio_trans); 721b530cc79SEric Van Hensbergen return register_virtio_driver(&p9_virtio_drv); 722b530cc79SEric Van Hensbergen } 723b530cc79SEric Van Hensbergen 724f3933545SEric Van Hensbergen static void __exit p9_virtio_cleanup(void) 725f3933545SEric Van Hensbergen { 726f3933545SEric Van Hensbergen unregister_virtio_driver(&p9_virtio_drv); 72772029fe8STejun Heo v9fs_unregister_trans(&p9_virtio_trans); 728f3933545SEric Van Hensbergen } 729f3933545SEric Van Hensbergen 730b530cc79SEric Van Hensbergen module_init(p9_virtio_init); 731f3933545SEric Van Hensbergen module_exit(p9_virtio_cleanup); 732b530cc79SEric Van Hensbergen 733b530cc79SEric Van Hensbergen MODULE_DEVICE_TABLE(virtio, id_table); 734b530cc79SEric Van Hensbergen MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); 735b530cc79SEric Van Hensbergen MODULE_DESCRIPTION("Virtio 9p Transport"); 736b530cc79SEric Van Hensbergen MODULE_LICENSE("GPL"); 737