xref: /openbmc/linux/virt/kvm/coalesced_mmio.c (revision a2cce7a9)
1 /*
2  * KVM coalesced MMIO
3  *
4  * Copyright (c) 2008 Bull S.A.S.
5  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
6  *
7  *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
8  *
9  */
10 
11 #include <kvm/iodev.h>
12 
13 #include <linux/kvm_host.h>
14 #include <linux/slab.h>
15 #include <linux/kvm.h>
16 
17 #include "coalesced_mmio.h"
18 
19 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20 {
21 	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22 }
23 
24 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 				   gpa_t addr, int len)
26 {
27 	/* is it in a batchable area ?
28 	 * (addr,len) is fully included in
29 	 * (zone->addr, zone->size)
30 	 */
31 	if (len < 0)
32 		return 0;
33 	if (addr + len < addr)
34 		return 0;
35 	if (addr < dev->zone.addr)
36 		return 0;
37 	if (addr + len > dev->zone.addr + dev->zone.size)
38 		return 0;
39 	return 1;
40 }
41 
42 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
43 {
44 	struct kvm_coalesced_mmio_ring *ring;
45 	unsigned avail;
46 
47 	/* Are we able to batch it ? */
48 
49 	/* last is the first free entry
50 	 * check if we don't meet the first used entry
51 	 * there is always one unused entry in the buffer
52 	 */
53 	ring = dev->kvm->coalesced_mmio_ring;
54 	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
55 	if (avail == 0) {
56 		/* full */
57 		return 0;
58 	}
59 
60 	return 1;
61 }
62 
63 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
64 				struct kvm_io_device *this, gpa_t addr,
65 				int len, const void *val)
66 {
67 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
68 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
69 
70 	if (!coalesced_mmio_in_range(dev, addr, len))
71 		return -EOPNOTSUPP;
72 
73 	spin_lock(&dev->kvm->ring_lock);
74 
75 	if (!coalesced_mmio_has_room(dev)) {
76 		spin_unlock(&dev->kvm->ring_lock);
77 		return -EOPNOTSUPP;
78 	}
79 
80 	/* copy data in first free entry of the ring */
81 
82 	ring->coalesced_mmio[ring->last].phys_addr = addr;
83 	ring->coalesced_mmio[ring->last].len = len;
84 	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
85 	smp_wmb();
86 	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
87 	spin_unlock(&dev->kvm->ring_lock);
88 	return 0;
89 }
90 
91 static void coalesced_mmio_destructor(struct kvm_io_device *this)
92 {
93 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
94 
95 	list_del(&dev->list);
96 
97 	kfree(dev);
98 }
99 
100 static const struct kvm_io_device_ops coalesced_mmio_ops = {
101 	.write      = coalesced_mmio_write,
102 	.destructor = coalesced_mmio_destructor,
103 };
104 
105 int kvm_coalesced_mmio_init(struct kvm *kvm)
106 {
107 	struct page *page;
108 	int ret;
109 
110 	ret = -ENOMEM;
111 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
112 	if (!page)
113 		goto out_err;
114 
115 	ret = 0;
116 	kvm->coalesced_mmio_ring = page_address(page);
117 
118 	/*
119 	 * We're using this spinlock to sync access to the coalesced ring.
120 	 * The list doesn't need it's own lock since device registration and
121 	 * unregistration should only happen when kvm->slots_lock is held.
122 	 */
123 	spin_lock_init(&kvm->ring_lock);
124 	INIT_LIST_HEAD(&kvm->coalesced_zones);
125 
126 out_err:
127 	return ret;
128 }
129 
130 void kvm_coalesced_mmio_free(struct kvm *kvm)
131 {
132 	if (kvm->coalesced_mmio_ring)
133 		free_page((unsigned long)kvm->coalesced_mmio_ring);
134 }
135 
136 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
137 					 struct kvm_coalesced_mmio_zone *zone)
138 {
139 	int ret;
140 	struct kvm_coalesced_mmio_dev *dev;
141 
142 	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
143 	if (!dev)
144 		return -ENOMEM;
145 
146 	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
147 	dev->kvm = kvm;
148 	dev->zone = *zone;
149 
150 	mutex_lock(&kvm->slots_lock);
151 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
152 				      zone->size, &dev->dev);
153 	if (ret < 0)
154 		goto out_free_dev;
155 	list_add_tail(&dev->list, &kvm->coalesced_zones);
156 	mutex_unlock(&kvm->slots_lock);
157 
158 	return 0;
159 
160 out_free_dev:
161 	mutex_unlock(&kvm->slots_lock);
162 	kfree(dev);
163 
164 	return ret;
165 }
166 
167 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
168 					   struct kvm_coalesced_mmio_zone *zone)
169 {
170 	struct kvm_coalesced_mmio_dev *dev, *tmp;
171 
172 	mutex_lock(&kvm->slots_lock);
173 
174 	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
175 		if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
176 			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
177 			kvm_iodevice_destructor(&dev->dev);
178 		}
179 
180 	mutex_unlock(&kvm->slots_lock);
181 
182 	return 0;
183 }
184