xref: /openbmc/linux/virt/kvm/coalesced_mmio.c (revision 545e4006)
1 /*
2  * KVM coalesced MMIO
3  *
4  * Copyright (c) 2008 Bull S.A.S.
5  *
6  *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
7  *
8  */
9 
10 #include "iodev.h"
11 
12 #include <linux/kvm_host.h>
13 #include <linux/kvm.h>
14 
15 #include "coalesced_mmio.h"
16 
17 static int coalesced_mmio_in_range(struct kvm_io_device *this,
18 				   gpa_t addr, int len, int is_write)
19 {
20 	struct kvm_coalesced_mmio_dev *dev =
21 				(struct kvm_coalesced_mmio_dev*)this->private;
22 	struct kvm_coalesced_mmio_zone *zone;
23 	int next;
24 	int i;
25 
26 	if (!is_write)
27 		return 0;
28 
29 	/* kvm->lock is taken by the caller and must be not released before
30          * dev.read/write
31          */
32 
33 	/* Are we able to batch it ? */
34 
35 	/* last is the first free entry
36 	 * check if we don't meet the first used entry
37 	 * there is always one unused entry in the buffer
38 	 */
39 
40 	next = (dev->kvm->coalesced_mmio_ring->last + 1) %
41 							KVM_COALESCED_MMIO_MAX;
42 	if (next == dev->kvm->coalesced_mmio_ring->first) {
43 		/* full */
44 		return 0;
45 	}
46 
47 	/* is it in a batchable area ? */
48 
49 	for (i = 0; i < dev->nb_zones; i++) {
50 		zone = &dev->zone[i];
51 
52 		/* (addr,len) is fully included in
53 		 * (zone->addr, zone->size)
54 		 */
55 
56 		if (zone->addr <= addr &&
57 		    addr + len <= zone->addr + zone->size)
58 			return 1;
59 	}
60 	return 0;
61 }
62 
63 static void coalesced_mmio_write(struct kvm_io_device *this,
64 				 gpa_t addr, int len, const void *val)
65 {
66 	struct kvm_coalesced_mmio_dev *dev =
67 				(struct kvm_coalesced_mmio_dev*)this->private;
68 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
69 
70 	/* kvm->lock must be taken by caller before call to in_range()*/
71 
72 	/* copy data in first free entry of the ring */
73 
74 	ring->coalesced_mmio[ring->last].phys_addr = addr;
75 	ring->coalesced_mmio[ring->last].len = len;
76 	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
77 	smp_wmb();
78 	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
79 }
80 
81 static void coalesced_mmio_destructor(struct kvm_io_device *this)
82 {
83 	kfree(this);
84 }
85 
86 int kvm_coalesced_mmio_init(struct kvm *kvm)
87 {
88 	struct kvm_coalesced_mmio_dev *dev;
89 
90 	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
91 	if (!dev)
92 		return -ENOMEM;
93 	dev->dev.write  = coalesced_mmio_write;
94 	dev->dev.in_range  = coalesced_mmio_in_range;
95 	dev->dev.destructor  = coalesced_mmio_destructor;
96 	dev->dev.private  = dev;
97 	dev->kvm = kvm;
98 	kvm->coalesced_mmio_dev = dev;
99 	kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
100 
101 	return 0;
102 }
103 
104 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
105 				         struct kvm_coalesced_mmio_zone *zone)
106 {
107 	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
108 
109 	if (dev == NULL)
110 		return -EINVAL;
111 
112 	mutex_lock(&kvm->lock);
113 	if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
114 		mutex_unlock(&kvm->lock);
115 		return -ENOBUFS;
116 	}
117 
118 	dev->zone[dev->nb_zones] = *zone;
119 	dev->nb_zones++;
120 
121 	mutex_unlock(&kvm->lock);
122 	return 0;
123 }
124 
125 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
126 					   struct kvm_coalesced_mmio_zone *zone)
127 {
128 	int i;
129 	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
130 	struct kvm_coalesced_mmio_zone *z;
131 
132 	if (dev == NULL)
133 		return -EINVAL;
134 
135 	mutex_lock(&kvm->lock);
136 
137 	i = dev->nb_zones;
138 	while(i) {
139 		z = &dev->zone[i - 1];
140 
141 		/* unregister all zones
142 		 * included in (zone->addr, zone->size)
143 		 */
144 
145 		if (zone->addr <= z->addr &&
146 		    z->addr + z->size <= zone->addr + zone->size) {
147 			dev->nb_zones--;
148 			*z = dev->zone[dev->nb_zones];
149 		}
150 		i--;
151 	}
152 
153 	mutex_unlock(&kvm->lock);
154 
155 	return 0;
156 }
157