xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /* Virtio ring implementation.
2  *
3  *  Copyright 2007 Rusty Russell IBM Corporation
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
27 #include <linux/dma-mapping.h>
28 #include <xen/xen.h>
29 
30 #ifdef DEBUG
31 /* For development, we want to crash whenever the ring is screwed. */
32 #define BAD_RING(_vq, fmt, args...)				\
33 	do {							\
34 		dev_err(&(_vq)->vq.vdev->dev,			\
35 			"%s:"fmt, (_vq)->vq.name, ##args);	\
36 		BUG();						\
37 	} while (0)
38 /* Caller is supposed to guarantee no reentry. */
39 #define START_USE(_vq)						\
40 	do {							\
41 		if ((_vq)->in_use)				\
42 			panic("%s:in_use = %i\n",		\
43 			      (_vq)->vq.name, (_vq)->in_use);	\
44 		(_vq)->in_use = __LINE__;			\
45 	} while (0)
46 #define END_USE(_vq) \
47 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
48 #else
49 #define BAD_RING(_vq, fmt, args...)				\
50 	do {							\
51 		dev_err(&_vq->vq.vdev->dev,			\
52 			"%s:"fmt, (_vq)->vq.name, ##args);	\
53 		(_vq)->broken = true;				\
54 	} while (0)
55 #define START_USE(vq)
56 #define END_USE(vq)
57 #endif
58 
59 struct vring_desc_state {
60 	void *data;			/* Data for callback. */
61 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
62 };
63 
64 struct vring_virtqueue {
65 	struct virtqueue vq;
66 
67 	/* Actual memory layout for this queue */
68 	struct vring vring;
69 
70 	/* Can we use weak barriers? */
71 	bool weak_barriers;
72 
73 	/* Other side has made a mess, don't try any more. */
74 	bool broken;
75 
76 	/* Host supports indirect buffers */
77 	bool indirect;
78 
79 	/* Host publishes avail event idx */
80 	bool event;
81 
82 	/* Head of free buffer list. */
83 	unsigned int free_head;
84 	/* Number we've added since last sync. */
85 	unsigned int num_added;
86 
87 	/* Last used index we've seen. */
88 	u16 last_used_idx;
89 
90 	/* Last written value to avail->flags */
91 	u16 avail_flags_shadow;
92 
93 	/* Last written value to avail->idx in guest byte order */
94 	u16 avail_idx_shadow;
95 
96 	/* How to notify other side. FIXME: commonalize hcalls! */
97 	bool (*notify)(struct virtqueue *vq);
98 
99 	/* DMA, allocation, and size information */
100 	bool we_own_ring;
101 	size_t queue_size_in_bytes;
102 	dma_addr_t queue_dma_addr;
103 
104 #ifdef DEBUG
105 	/* They're supposed to lock for us. */
106 	unsigned int in_use;
107 
108 	/* Figure out if their kicks are too delayed. */
109 	bool last_add_time_valid;
110 	ktime_t last_add_time;
111 #endif
112 
113 	/* Per-descriptor state. */
114 	struct vring_desc_state desc_state[];
115 };
116 
117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
118 
119 /*
120  * Modern virtio devices have feature bits to specify whether they need a
121  * quirk and bypass the IOMMU. If not there, just use the DMA API.
122  *
123  * If there, the interaction between virtio and DMA API is messy.
124  *
125  * On most systems with virtio, physical addresses match bus addresses,
126  * and it doesn't particularly matter whether we use the DMA API.
127  *
128  * On some systems, including Xen and any system with a physical device
129  * that speaks virtio behind a physical IOMMU, we must use the DMA API
130  * for virtio DMA to work at all.
131  *
132  * On other systems, including SPARC and PPC64, virtio-pci devices are
133  * enumerated as though they are behind an IOMMU, but the virtio host
134  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135  * there or somehow map everything as the identity.
136  *
137  * For the time being, we preserve historic behavior and bypass the DMA
138  * API.
139  *
140  * TODO: install a per-device DMA ops structure that does the right thing
141  * taking into account all the above quirks, and use the DMA API
142  * unconditionally on data path.
143  */
144 
145 static bool vring_use_dma_api(struct virtio_device *vdev)
146 {
147 	if (!virtio_has_iommu_quirk(vdev))
148 		return true;
149 
150 	/* Otherwise, we are left to guess. */
151 	/*
152 	 * In theory, it's possible to have a buggy QEMU-supposed
153 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
154 	 * such a configuration, virtio has never worked and will
155 	 * not work without an even larger kludge.  Instead, enable
156 	 * the DMA API if we're a Xen guest, which at least allows
157 	 * all of the sensible Xen configurations to work correctly.
158 	 */
159 	if (xen_domain())
160 		return true;
161 
162 	return false;
163 }
164 
165 /*
166  * The DMA ops on various arches are rather gnarly right now, and
167  * making all of the arch DMA ops work on the vring device itself
168  * is a mess.  For now, we use the parent device for DMA ops.
169  */
170 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171 {
172 	return vq->vq.vdev->dev.parent;
173 }
174 
175 /* Map one sg entry. */
176 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
177 				   struct scatterlist *sg,
178 				   enum dma_data_direction direction)
179 {
180 	if (!vring_use_dma_api(vq->vq.vdev))
181 		return (dma_addr_t)sg_phys(sg);
182 
183 	/*
184 	 * We can't use dma_map_sg, because we don't use scatterlists in
185 	 * the way it expects (we don't guarantee that the scatterlist
186 	 * will exist for the lifetime of the mapping).
187 	 */
188 	return dma_map_page(vring_dma_dev(vq),
189 			    sg_page(sg), sg->offset, sg->length,
190 			    direction);
191 }
192 
193 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
194 				   void *cpu_addr, size_t size,
195 				   enum dma_data_direction direction)
196 {
197 	if (!vring_use_dma_api(vq->vq.vdev))
198 		return (dma_addr_t)virt_to_phys(cpu_addr);
199 
200 	return dma_map_single(vring_dma_dev(vq),
201 			      cpu_addr, size, direction);
202 }
203 
204 static void vring_unmap_one(const struct vring_virtqueue *vq,
205 			    struct vring_desc *desc)
206 {
207 	u16 flags;
208 
209 	if (!vring_use_dma_api(vq->vq.vdev))
210 		return;
211 
212 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
213 
214 	if (flags & VRING_DESC_F_INDIRECT) {
215 		dma_unmap_single(vring_dma_dev(vq),
216 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
217 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
218 				 (flags & VRING_DESC_F_WRITE) ?
219 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
220 	} else {
221 		dma_unmap_page(vring_dma_dev(vq),
222 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
223 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
224 			       (flags & VRING_DESC_F_WRITE) ?
225 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
226 	}
227 }
228 
229 static int vring_mapping_error(const struct vring_virtqueue *vq,
230 			       dma_addr_t addr)
231 {
232 	if (!vring_use_dma_api(vq->vq.vdev))
233 		return 0;
234 
235 	return dma_mapping_error(vring_dma_dev(vq), addr);
236 }
237 
238 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
239 					 unsigned int total_sg, gfp_t gfp)
240 {
241 	struct vring_desc *desc;
242 	unsigned int i;
243 
244 	/*
245 	 * We require lowmem mappings for the descriptors because
246 	 * otherwise virt_to_phys will give us bogus addresses in the
247 	 * virtqueue.
248 	 */
249 	gfp &= ~__GFP_HIGHMEM;
250 
251 	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
252 	if (!desc)
253 		return NULL;
254 
255 	for (i = 0; i < total_sg; i++)
256 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
257 	return desc;
258 }
259 
260 static inline int virtqueue_add(struct virtqueue *_vq,
261 				struct scatterlist *sgs[],
262 				unsigned int total_sg,
263 				unsigned int out_sgs,
264 				unsigned int in_sgs,
265 				void *data,
266 				void *ctx,
267 				gfp_t gfp)
268 {
269 	struct vring_virtqueue *vq = to_vvq(_vq);
270 	struct scatterlist *sg;
271 	struct vring_desc *desc;
272 	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
273 	int head;
274 	bool indirect;
275 
276 	START_USE(vq);
277 
278 	BUG_ON(data == NULL);
279 	BUG_ON(ctx && vq->indirect);
280 
281 	if (unlikely(vq->broken)) {
282 		END_USE(vq);
283 		return -EIO;
284 	}
285 
286 #ifdef DEBUG
287 	{
288 		ktime_t now = ktime_get();
289 
290 		/* No kick or get, with .1 second between?  Warn. */
291 		if (vq->last_add_time_valid)
292 			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
293 					    > 100);
294 		vq->last_add_time = now;
295 		vq->last_add_time_valid = true;
296 	}
297 #endif
298 
299 	BUG_ON(total_sg == 0);
300 
301 	head = vq->free_head;
302 
303 	/* If the host supports indirect descriptor tables, and we have multiple
304 	 * buffers, then go indirect. FIXME: tune this threshold */
305 	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
306 		desc = alloc_indirect(_vq, total_sg, gfp);
307 	else {
308 		desc = NULL;
309 		WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
310 	}
311 
312 	if (desc) {
313 		/* Use a single buffer which doesn't continue */
314 		indirect = true;
315 		/* Set up rest to use this indirect table. */
316 		i = 0;
317 		descs_used = 1;
318 	} else {
319 		indirect = false;
320 		desc = vq->vring.desc;
321 		i = head;
322 		descs_used = total_sg;
323 	}
324 
325 	if (vq->vq.num_free < descs_used) {
326 		pr_debug("Can't add buf len %i - avail = %i\n",
327 			 descs_used, vq->vq.num_free);
328 		/* FIXME: for historical reasons, we force a notify here if
329 		 * there are outgoing parts to the buffer.  Presumably the
330 		 * host should service the ring ASAP. */
331 		if (out_sgs)
332 			vq->notify(&vq->vq);
333 		if (indirect)
334 			kfree(desc);
335 		END_USE(vq);
336 		return -ENOSPC;
337 	}
338 
339 	for (n = 0; n < out_sgs; n++) {
340 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
341 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
342 			if (vring_mapping_error(vq, addr))
343 				goto unmap_release;
344 
345 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
346 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
347 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
348 			prev = i;
349 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
350 		}
351 	}
352 	for (; n < (out_sgs + in_sgs); n++) {
353 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
354 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
355 			if (vring_mapping_error(vq, addr))
356 				goto unmap_release;
357 
358 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
359 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
360 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
361 			prev = i;
362 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
363 		}
364 	}
365 	/* Last one doesn't continue. */
366 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
367 
368 	if (indirect) {
369 		/* Now that the indirect table is filled in, map it. */
370 		dma_addr_t addr = vring_map_single(
371 			vq, desc, total_sg * sizeof(struct vring_desc),
372 			DMA_TO_DEVICE);
373 		if (vring_mapping_error(vq, addr))
374 			goto unmap_release;
375 
376 		vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
377 		vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
378 
379 		vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
380 	}
381 
382 	/* We're using some buffers from the free list. */
383 	vq->vq.num_free -= descs_used;
384 
385 	/* Update free pointer */
386 	if (indirect)
387 		vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
388 	else
389 		vq->free_head = i;
390 
391 	/* Store token and indirect buffer state. */
392 	vq->desc_state[head].data = data;
393 	if (indirect)
394 		vq->desc_state[head].indir_desc = desc;
395 	else
396 		vq->desc_state[head].indir_desc = ctx;
397 
398 	/* Put entry in available array (but don't update avail->idx until they
399 	 * do sync). */
400 	avail = vq->avail_idx_shadow & (vq->vring.num - 1);
401 	vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
402 
403 	/* Descriptors and available array need to be set before we expose the
404 	 * new available array entries. */
405 	virtio_wmb(vq->weak_barriers);
406 	vq->avail_idx_shadow++;
407 	vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
408 	vq->num_added++;
409 
410 	pr_debug("Added buffer head %i to %p\n", head, vq);
411 	END_USE(vq);
412 
413 	/* This is very unlikely, but theoretically possible.  Kick
414 	 * just in case. */
415 	if (unlikely(vq->num_added == (1 << 16) - 1))
416 		virtqueue_kick(_vq);
417 
418 	return 0;
419 
420 unmap_release:
421 	err_idx = i;
422 	i = head;
423 
424 	for (n = 0; n < total_sg; n++) {
425 		if (i == err_idx)
426 			break;
427 		vring_unmap_one(vq, &desc[i]);
428 		i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
429 	}
430 
431 	if (indirect)
432 		kfree(desc);
433 
434 	END_USE(vq);
435 	return -EIO;
436 }
437 
438 /**
439  * virtqueue_add_sgs - expose buffers to other end
440  * @vq: the struct virtqueue we're talking about.
441  * @sgs: array of terminated scatterlists.
442  * @out_num: the number of scatterlists readable by other side
443  * @in_num: the number of scatterlists which are writable (after readable ones)
444  * @data: the token identifying the buffer.
445  * @gfp: how to do memory allocations (if necessary).
446  *
447  * Caller must ensure we don't call this with other virtqueue operations
448  * at the same time (except where noted).
449  *
450  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
451  */
452 int virtqueue_add_sgs(struct virtqueue *_vq,
453 		      struct scatterlist *sgs[],
454 		      unsigned int out_sgs,
455 		      unsigned int in_sgs,
456 		      void *data,
457 		      gfp_t gfp)
458 {
459 	unsigned int i, total_sg = 0;
460 
461 	/* Count them first. */
462 	for (i = 0; i < out_sgs + in_sgs; i++) {
463 		struct scatterlist *sg;
464 		for (sg = sgs[i]; sg; sg = sg_next(sg))
465 			total_sg++;
466 	}
467 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
468 			     data, NULL, gfp);
469 }
470 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
471 
472 /**
473  * virtqueue_add_outbuf - expose output buffers to other end
474  * @vq: the struct virtqueue we're talking about.
475  * @sg: scatterlist (must be well-formed and terminated!)
476  * @num: the number of entries in @sg readable by other side
477  * @data: the token identifying the buffer.
478  * @gfp: how to do memory allocations (if necessary).
479  *
480  * Caller must ensure we don't call this with other virtqueue operations
481  * at the same time (except where noted).
482  *
483  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
484  */
485 int virtqueue_add_outbuf(struct virtqueue *vq,
486 			 struct scatterlist *sg, unsigned int num,
487 			 void *data,
488 			 gfp_t gfp)
489 {
490 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
491 }
492 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
493 
494 /**
495  * virtqueue_add_inbuf - expose input buffers to other end
496  * @vq: the struct virtqueue we're talking about.
497  * @sg: scatterlist (must be well-formed and terminated!)
498  * @num: the number of entries in @sg writable by other side
499  * @data: the token identifying the buffer.
500  * @gfp: how to do memory allocations (if necessary).
501  *
502  * Caller must ensure we don't call this with other virtqueue operations
503  * at the same time (except where noted).
504  *
505  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
506  */
507 int virtqueue_add_inbuf(struct virtqueue *vq,
508 			struct scatterlist *sg, unsigned int num,
509 			void *data,
510 			gfp_t gfp)
511 {
512 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
513 }
514 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
515 
516 /**
517  * virtqueue_add_inbuf_ctx - expose input buffers to other end
518  * @vq: the struct virtqueue we're talking about.
519  * @sg: scatterlist (must be well-formed and terminated!)
520  * @num: the number of entries in @sg writable by other side
521  * @data: the token identifying the buffer.
522  * @ctx: extra context for the token
523  * @gfp: how to do memory allocations (if necessary).
524  *
525  * Caller must ensure we don't call this with other virtqueue operations
526  * at the same time (except where noted).
527  *
528  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
529  */
530 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
531 			struct scatterlist *sg, unsigned int num,
532 			void *data,
533 			void *ctx,
534 			gfp_t gfp)
535 {
536 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
537 }
538 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
539 
540 /**
541  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
542  * @vq: the struct virtqueue
543  *
544  * Instead of virtqueue_kick(), you can do:
545  *	if (virtqueue_kick_prepare(vq))
546  *		virtqueue_notify(vq);
547  *
548  * This is sometimes useful because the virtqueue_kick_prepare() needs
549  * to be serialized, but the actual virtqueue_notify() call does not.
550  */
551 bool virtqueue_kick_prepare(struct virtqueue *_vq)
552 {
553 	struct vring_virtqueue *vq = to_vvq(_vq);
554 	u16 new, old;
555 	bool needs_kick;
556 
557 	START_USE(vq);
558 	/* We need to expose available array entries before checking avail
559 	 * event. */
560 	virtio_mb(vq->weak_barriers);
561 
562 	old = vq->avail_idx_shadow - vq->num_added;
563 	new = vq->avail_idx_shadow;
564 	vq->num_added = 0;
565 
566 #ifdef DEBUG
567 	if (vq->last_add_time_valid) {
568 		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
569 					      vq->last_add_time)) > 100);
570 	}
571 	vq->last_add_time_valid = false;
572 #endif
573 
574 	if (vq->event) {
575 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
576 					      new, old);
577 	} else {
578 		needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
579 	}
580 	END_USE(vq);
581 	return needs_kick;
582 }
583 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
584 
585 /**
586  * virtqueue_notify - second half of split virtqueue_kick call.
587  * @vq: the struct virtqueue
588  *
589  * This does not need to be serialized.
590  *
591  * Returns false if host notify failed or queue is broken, otherwise true.
592  */
593 bool virtqueue_notify(struct virtqueue *_vq)
594 {
595 	struct vring_virtqueue *vq = to_vvq(_vq);
596 
597 	if (unlikely(vq->broken))
598 		return false;
599 
600 	/* Prod other side to tell it about changes. */
601 	if (!vq->notify(_vq)) {
602 		vq->broken = true;
603 		return false;
604 	}
605 	return true;
606 }
607 EXPORT_SYMBOL_GPL(virtqueue_notify);
608 
609 /**
610  * virtqueue_kick - update after add_buf
611  * @vq: the struct virtqueue
612  *
613  * After one or more virtqueue_add_* calls, invoke this to kick
614  * the other side.
615  *
616  * Caller must ensure we don't call this with other virtqueue
617  * operations at the same time (except where noted).
618  *
619  * Returns false if kick failed, otherwise true.
620  */
621 bool virtqueue_kick(struct virtqueue *vq)
622 {
623 	if (virtqueue_kick_prepare(vq))
624 		return virtqueue_notify(vq);
625 	return true;
626 }
627 EXPORT_SYMBOL_GPL(virtqueue_kick);
628 
629 static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
630 		       void **ctx)
631 {
632 	unsigned int i, j;
633 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
634 
635 	/* Clear data ptr. */
636 	vq->desc_state[head].data = NULL;
637 
638 	/* Put back on free list: unmap first-level descriptors and find end */
639 	i = head;
640 
641 	while (vq->vring.desc[i].flags & nextflag) {
642 		vring_unmap_one(vq, &vq->vring.desc[i]);
643 		i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
644 		vq->vq.num_free++;
645 	}
646 
647 	vring_unmap_one(vq, &vq->vring.desc[i]);
648 	vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
649 	vq->free_head = head;
650 
651 	/* Plus final descriptor */
652 	vq->vq.num_free++;
653 
654 	if (vq->indirect) {
655 		struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
656 		u32 len;
657 
658 		/* Free the indirect table, if any, now that it's unmapped. */
659 		if (!indir_desc)
660 			return;
661 
662 		len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
663 
664 		BUG_ON(!(vq->vring.desc[head].flags &
665 			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
666 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
667 
668 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
669 			vring_unmap_one(vq, &indir_desc[j]);
670 
671 		kfree(indir_desc);
672 		vq->desc_state[head].indir_desc = NULL;
673 	} else if (ctx) {
674 		*ctx = vq->desc_state[head].indir_desc;
675 	}
676 }
677 
678 static inline bool more_used(const struct vring_virtqueue *vq)
679 {
680 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
681 }
682 
683 /**
684  * virtqueue_get_buf - get the next used buffer
685  * @vq: the struct virtqueue we're talking about.
686  * @len: the length written into the buffer
687  *
688  * If the device wrote data into the buffer, @len will be set to the
689  * amount written.  This means you don't need to clear the buffer
690  * beforehand to ensure there's no data leakage in the case of short
691  * writes.
692  *
693  * Caller must ensure we don't call this with other virtqueue
694  * operations at the same time (except where noted).
695  *
696  * Returns NULL if there are no used buffers, or the "data" token
697  * handed to virtqueue_add_*().
698  */
699 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
700 			    void **ctx)
701 {
702 	struct vring_virtqueue *vq = to_vvq(_vq);
703 	void *ret;
704 	unsigned int i;
705 	u16 last_used;
706 
707 	START_USE(vq);
708 
709 	if (unlikely(vq->broken)) {
710 		END_USE(vq);
711 		return NULL;
712 	}
713 
714 	if (!more_used(vq)) {
715 		pr_debug("No more buffers in queue\n");
716 		END_USE(vq);
717 		return NULL;
718 	}
719 
720 	/* Only get used array entries after they have been exposed by host. */
721 	virtio_rmb(vq->weak_barriers);
722 
723 	last_used = (vq->last_used_idx & (vq->vring.num - 1));
724 	i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
725 	*len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
726 
727 	if (unlikely(i >= vq->vring.num)) {
728 		BAD_RING(vq, "id %u out of range\n", i);
729 		return NULL;
730 	}
731 	if (unlikely(!vq->desc_state[i].data)) {
732 		BAD_RING(vq, "id %u is not a head!\n", i);
733 		return NULL;
734 	}
735 
736 	/* detach_buf clears data, so grab it now. */
737 	ret = vq->desc_state[i].data;
738 	detach_buf(vq, i, ctx);
739 	vq->last_used_idx++;
740 	/* If we expect an interrupt for the next entry, tell host
741 	 * by writing event index and flush out the write before
742 	 * the read in the next get_buf call. */
743 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
744 		virtio_store_mb(vq->weak_barriers,
745 				&vring_used_event(&vq->vring),
746 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
747 
748 #ifdef DEBUG
749 	vq->last_add_time_valid = false;
750 #endif
751 
752 	END_USE(vq);
753 	return ret;
754 }
755 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
756 
757 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
758 {
759 	return virtqueue_get_buf_ctx(_vq, len, NULL);
760 }
761 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
762 /**
763  * virtqueue_disable_cb - disable callbacks
764  * @vq: the struct virtqueue we're talking about.
765  *
766  * Note that this is not necessarily synchronous, hence unreliable and only
767  * useful as an optimization.
768  *
769  * Unlike other operations, this need not be serialized.
770  */
771 void virtqueue_disable_cb(struct virtqueue *_vq)
772 {
773 	struct vring_virtqueue *vq = to_vvq(_vq);
774 
775 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
776 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
777 		if (!vq->event)
778 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
779 	}
780 
781 }
782 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
783 
784 /**
785  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
786  * @vq: the struct virtqueue we're talking about.
787  *
788  * This re-enables callbacks; it returns current queue state
789  * in an opaque unsigned value. This value should be later tested by
790  * virtqueue_poll, to detect a possible race between the driver checking for
791  * more work, and enabling callbacks.
792  *
793  * Caller must ensure we don't call this with other virtqueue
794  * operations at the same time (except where noted).
795  */
796 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
797 {
798 	struct vring_virtqueue *vq = to_vvq(_vq);
799 	u16 last_used_idx;
800 
801 	START_USE(vq);
802 
803 	/* We optimistically turn back on interrupts, then check if there was
804 	 * more to do. */
805 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
806 	 * either clear the flags bit or point the event index at the next
807 	 * entry. Always do both to keep code simple. */
808 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
809 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
810 		if (!vq->event)
811 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
812 	}
813 	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
814 	END_USE(vq);
815 	return last_used_idx;
816 }
817 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
818 
819 /**
820  * virtqueue_poll - query pending used buffers
821  * @vq: the struct virtqueue we're talking about.
822  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
823  *
824  * Returns "true" if there are pending used buffers in the queue.
825  *
826  * This does not need to be serialized.
827  */
828 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
829 {
830 	struct vring_virtqueue *vq = to_vvq(_vq);
831 
832 	virtio_mb(vq->weak_barriers);
833 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
834 }
835 EXPORT_SYMBOL_GPL(virtqueue_poll);
836 
837 /**
838  * virtqueue_enable_cb - restart callbacks after disable_cb.
839  * @vq: the struct virtqueue we're talking about.
840  *
841  * This re-enables callbacks; it returns "false" if there are pending
842  * buffers in the queue, to detect a possible race between the driver
843  * checking for more work, and enabling callbacks.
844  *
845  * Caller must ensure we don't call this with other virtqueue
846  * operations at the same time (except where noted).
847  */
848 bool virtqueue_enable_cb(struct virtqueue *_vq)
849 {
850 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
851 	return !virtqueue_poll(_vq, last_used_idx);
852 }
853 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
854 
855 /**
856  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
857  * @vq: the struct virtqueue we're talking about.
858  *
859  * This re-enables callbacks but hints to the other side to delay
860  * interrupts until most of the available buffers have been processed;
861  * it returns "false" if there are many pending buffers in the queue,
862  * to detect a possible race between the driver checking for more work,
863  * and enabling callbacks.
864  *
865  * Caller must ensure we don't call this with other virtqueue
866  * operations at the same time (except where noted).
867  */
868 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
869 {
870 	struct vring_virtqueue *vq = to_vvq(_vq);
871 	u16 bufs;
872 
873 	START_USE(vq);
874 
875 	/* We optimistically turn back on interrupts, then check if there was
876 	 * more to do. */
877 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
878 	 * either clear the flags bit or point the event index at the next
879 	 * entry. Always update the event index to keep code simple. */
880 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
881 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
882 		if (!vq->event)
883 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
884 	}
885 	/* TODO: tune this threshold */
886 	bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
887 
888 	virtio_store_mb(vq->weak_barriers,
889 			&vring_used_event(&vq->vring),
890 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
891 
892 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
893 		END_USE(vq);
894 		return false;
895 	}
896 
897 	END_USE(vq);
898 	return true;
899 }
900 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
901 
902 /**
903  * virtqueue_detach_unused_buf - detach first unused buffer
904  * @vq: the struct virtqueue we're talking about.
905  *
906  * Returns NULL or the "data" token handed to virtqueue_add_*().
907  * This is not valid on an active queue; it is useful only for device
908  * shutdown.
909  */
910 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
911 {
912 	struct vring_virtqueue *vq = to_vvq(_vq);
913 	unsigned int i;
914 	void *buf;
915 
916 	START_USE(vq);
917 
918 	for (i = 0; i < vq->vring.num; i++) {
919 		if (!vq->desc_state[i].data)
920 			continue;
921 		/* detach_buf clears data, so grab it now. */
922 		buf = vq->desc_state[i].data;
923 		detach_buf(vq, i, NULL);
924 		vq->avail_idx_shadow--;
925 		vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
926 		END_USE(vq);
927 		return buf;
928 	}
929 	/* That should have freed everything. */
930 	BUG_ON(vq->vq.num_free != vq->vring.num);
931 
932 	END_USE(vq);
933 	return NULL;
934 }
935 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
936 
937 irqreturn_t vring_interrupt(int irq, void *_vq)
938 {
939 	struct vring_virtqueue *vq = to_vvq(_vq);
940 
941 	if (!more_used(vq)) {
942 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
943 		return IRQ_NONE;
944 	}
945 
946 	if (unlikely(vq->broken))
947 		return IRQ_HANDLED;
948 
949 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
950 	if (vq->vq.callback)
951 		vq->vq.callback(&vq->vq);
952 
953 	return IRQ_HANDLED;
954 }
955 EXPORT_SYMBOL_GPL(vring_interrupt);
956 
957 struct virtqueue *__vring_new_virtqueue(unsigned int index,
958 					struct vring vring,
959 					struct virtio_device *vdev,
960 					bool weak_barriers,
961 					bool context,
962 					bool (*notify)(struct virtqueue *),
963 					void (*callback)(struct virtqueue *),
964 					const char *name)
965 {
966 	unsigned int i;
967 	struct vring_virtqueue *vq;
968 
969 	vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
970 		     GFP_KERNEL);
971 	if (!vq)
972 		return NULL;
973 
974 	vq->vring = vring;
975 	vq->vq.callback = callback;
976 	vq->vq.vdev = vdev;
977 	vq->vq.name = name;
978 	vq->vq.num_free = vring.num;
979 	vq->vq.index = index;
980 	vq->we_own_ring = false;
981 	vq->queue_dma_addr = 0;
982 	vq->queue_size_in_bytes = 0;
983 	vq->notify = notify;
984 	vq->weak_barriers = weak_barriers;
985 	vq->broken = false;
986 	vq->last_used_idx = 0;
987 	vq->avail_flags_shadow = 0;
988 	vq->avail_idx_shadow = 0;
989 	vq->num_added = 0;
990 	list_add_tail(&vq->vq.list, &vdev->vqs);
991 #ifdef DEBUG
992 	vq->in_use = false;
993 	vq->last_add_time_valid = false;
994 #endif
995 
996 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
997 		!context;
998 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
999 
1000 	/* No callback?  Tell other side not to bother us. */
1001 	if (!callback) {
1002 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
1003 		if (!vq->event)
1004 			vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
1005 	}
1006 
1007 	/* Put everything in free lists. */
1008 	vq->free_head = 0;
1009 	for (i = 0; i < vring.num-1; i++)
1010 		vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
1011 	memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
1012 
1013 	return &vq->vq;
1014 }
1015 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1016 
1017 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
1018 			      dma_addr_t *dma_handle, gfp_t flag)
1019 {
1020 	if (vring_use_dma_api(vdev)) {
1021 		return dma_alloc_coherent(vdev->dev.parent, size,
1022 					  dma_handle, flag);
1023 	} else {
1024 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
1025 		if (queue) {
1026 			phys_addr_t phys_addr = virt_to_phys(queue);
1027 			*dma_handle = (dma_addr_t)phys_addr;
1028 
1029 			/*
1030 			 * Sanity check: make sure we dind't truncate
1031 			 * the address.  The only arches I can find that
1032 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
1033 			 * are certain non-highmem MIPS and x86
1034 			 * configurations, but these configurations
1035 			 * should never allocate physical pages above 32
1036 			 * bits, so this is fine.  Just in case, throw a
1037 			 * warning and abort if we end up with an
1038 			 * unrepresentable address.
1039 			 */
1040 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1041 				free_pages_exact(queue, PAGE_ALIGN(size));
1042 				return NULL;
1043 			}
1044 		}
1045 		return queue;
1046 	}
1047 }
1048 
1049 static void vring_free_queue(struct virtio_device *vdev, size_t size,
1050 			     void *queue, dma_addr_t dma_handle)
1051 {
1052 	if (vring_use_dma_api(vdev)) {
1053 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1054 	} else {
1055 		free_pages_exact(queue, PAGE_ALIGN(size));
1056 	}
1057 }
1058 
1059 struct virtqueue *vring_create_virtqueue(
1060 	unsigned int index,
1061 	unsigned int num,
1062 	unsigned int vring_align,
1063 	struct virtio_device *vdev,
1064 	bool weak_barriers,
1065 	bool may_reduce_num,
1066 	bool context,
1067 	bool (*notify)(struct virtqueue *),
1068 	void (*callback)(struct virtqueue *),
1069 	const char *name)
1070 {
1071 	struct virtqueue *vq;
1072 	void *queue = NULL;
1073 	dma_addr_t dma_addr;
1074 	size_t queue_size_in_bytes;
1075 	struct vring vring;
1076 
1077 	/* We assume num is a power of 2. */
1078 	if (num & (num - 1)) {
1079 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1080 		return NULL;
1081 	}
1082 
1083 	/* TODO: allocate each queue chunk individually */
1084 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1085 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1086 					  &dma_addr,
1087 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1088 		if (queue)
1089 			break;
1090 	}
1091 
1092 	if (!num)
1093 		return NULL;
1094 
1095 	if (!queue) {
1096 		/* Try to get a single page. You are my only hope! */
1097 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1098 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
1099 	}
1100 	if (!queue)
1101 		return NULL;
1102 
1103 	queue_size_in_bytes = vring_size(num, vring_align);
1104 	vring_init(&vring, num, queue, vring_align);
1105 
1106 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1107 				   notify, callback, name);
1108 	if (!vq) {
1109 		vring_free_queue(vdev, queue_size_in_bytes, queue,
1110 				 dma_addr);
1111 		return NULL;
1112 	}
1113 
1114 	to_vvq(vq)->queue_dma_addr = dma_addr;
1115 	to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1116 	to_vvq(vq)->we_own_ring = true;
1117 
1118 	return vq;
1119 }
1120 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1121 
1122 struct virtqueue *vring_new_virtqueue(unsigned int index,
1123 				      unsigned int num,
1124 				      unsigned int vring_align,
1125 				      struct virtio_device *vdev,
1126 				      bool weak_barriers,
1127 				      bool context,
1128 				      void *pages,
1129 				      bool (*notify)(struct virtqueue *vq),
1130 				      void (*callback)(struct virtqueue *vq),
1131 				      const char *name)
1132 {
1133 	struct vring vring;
1134 	vring_init(&vring, num, pages, vring_align);
1135 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1136 				     notify, callback, name);
1137 }
1138 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1139 
1140 void vring_del_virtqueue(struct virtqueue *_vq)
1141 {
1142 	struct vring_virtqueue *vq = to_vvq(_vq);
1143 
1144 	if (vq->we_own_ring) {
1145 		vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1146 				 vq->vring.desc, vq->queue_dma_addr);
1147 	}
1148 	list_del(&_vq->list);
1149 	kfree(vq);
1150 }
1151 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1152 
1153 /* Manipulates transport-specific feature bits. */
1154 void vring_transport_features(struct virtio_device *vdev)
1155 {
1156 	unsigned int i;
1157 
1158 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1159 		switch (i) {
1160 		case VIRTIO_RING_F_INDIRECT_DESC:
1161 			break;
1162 		case VIRTIO_RING_F_EVENT_IDX:
1163 			break;
1164 		case VIRTIO_F_VERSION_1:
1165 			break;
1166 		case VIRTIO_F_IOMMU_PLATFORM:
1167 			break;
1168 		default:
1169 			/* We don't understand this bit. */
1170 			__virtio_clear_bit(vdev, i);
1171 		}
1172 	}
1173 }
1174 EXPORT_SYMBOL_GPL(vring_transport_features);
1175 
1176 /**
1177  * virtqueue_get_vring_size - return the size of the virtqueue's vring
1178  * @vq: the struct virtqueue containing the vring of interest.
1179  *
1180  * Returns the size of the vring.  This is mainly used for boasting to
1181  * userspace.  Unlike other operations, this need not be serialized.
1182  */
1183 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1184 {
1185 
1186 	struct vring_virtqueue *vq = to_vvq(_vq);
1187 
1188 	return vq->vring.num;
1189 }
1190 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1191 
1192 bool virtqueue_is_broken(struct virtqueue *_vq)
1193 {
1194 	struct vring_virtqueue *vq = to_vvq(_vq);
1195 
1196 	return vq->broken;
1197 }
1198 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1199 
1200 /*
1201  * This should prevent the device from being used, allowing drivers to
1202  * recover.  You may need to grab appropriate locks to flush.
1203  */
1204 void virtio_break_device(struct virtio_device *dev)
1205 {
1206 	struct virtqueue *_vq;
1207 
1208 	list_for_each_entry(_vq, &dev->vqs, list) {
1209 		struct vring_virtqueue *vq = to_vvq(_vq);
1210 		vq->broken = true;
1211 	}
1212 }
1213 EXPORT_SYMBOL_GPL(virtio_break_device);
1214 
1215 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1216 {
1217 	struct vring_virtqueue *vq = to_vvq(_vq);
1218 
1219 	BUG_ON(!vq->we_own_ring);
1220 
1221 	return vq->queue_dma_addr;
1222 }
1223 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1224 
1225 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1226 {
1227 	struct vring_virtqueue *vq = to_vvq(_vq);
1228 
1229 	BUG_ON(!vq->we_own_ring);
1230 
1231 	return vq->queue_dma_addr +
1232 		((char *)vq->vring.avail - (char *)vq->vring.desc);
1233 }
1234 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1235 
1236 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1237 {
1238 	struct vring_virtqueue *vq = to_vvq(_vq);
1239 
1240 	BUG_ON(!vq->we_own_ring);
1241 
1242 	return vq->queue_dma_addr +
1243 		((char *)vq->vring.used - (char *)vq->vring.desc);
1244 }
1245 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1246 
1247 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1248 {
1249 	return &to_vvq(vq)->vring;
1250 }
1251 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1252 
1253 MODULE_LICENSE("GPL");
1254