xref: /openbmc/linux/virt/kvm/coalesced_mmio.c (revision 1f327613)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM coalesced MMIO
4  *
5  * Copyright (c) 2008 Bull S.A.S.
6  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7  *
8  *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
9  *
10  */
11 
12 #include <kvm/iodev.h>
13 
14 #include <linux/kvm_host.h>
15 #include <linux/slab.h>
16 #include <linux/kvm.h>
17 
18 #include "coalesced_mmio.h"
19 
20 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21 {
22 	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23 }
24 
25 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 				   gpa_t addr, int len)
27 {
28 	/* is it in a batchable area ?
29 	 * (addr,len) is fully included in
30 	 * (zone->addr, zone->size)
31 	 */
32 	if (len < 0)
33 		return 0;
34 	if (addr + len < addr)
35 		return 0;
36 	if (addr < dev->zone.addr)
37 		return 0;
38 	if (addr + len > dev->zone.addr + dev->zone.size)
39 		return 0;
40 	return 1;
41 }
42 
43 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
44 {
45 	struct kvm_coalesced_mmio_ring *ring;
46 	unsigned avail;
47 
48 	/* Are we able to batch it ? */
49 
50 	/* last is the first free entry
51 	 * check if we don't meet the first used entry
52 	 * there is always one unused entry in the buffer
53 	 */
54 	ring = dev->kvm->coalesced_mmio_ring;
55 	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
56 	if (avail == 0) {
57 		/* full */
58 		return 0;
59 	}
60 
61 	return 1;
62 }
63 
64 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65 				struct kvm_io_device *this, gpa_t addr,
66 				int len, const void *val)
67 {
68 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
70 
71 	if (!coalesced_mmio_in_range(dev, addr, len))
72 		return -EOPNOTSUPP;
73 
74 	spin_lock(&dev->kvm->ring_lock);
75 
76 	if (!coalesced_mmio_has_room(dev)) {
77 		spin_unlock(&dev->kvm->ring_lock);
78 		return -EOPNOTSUPP;
79 	}
80 
81 	/* copy data in first free entry of the ring */
82 
83 	ring->coalesced_mmio[ring->last].phys_addr = addr;
84 	ring->coalesced_mmio[ring->last].len = len;
85 	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
86 	ring->coalesced_mmio[ring->last].pio = dev->zone.pio;
87 	smp_wmb();
88 	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
89 	spin_unlock(&dev->kvm->ring_lock);
90 	return 0;
91 }
92 
93 static void coalesced_mmio_destructor(struct kvm_io_device *this)
94 {
95 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
96 
97 	list_del(&dev->list);
98 
99 	kfree(dev);
100 }
101 
102 static const struct kvm_io_device_ops coalesced_mmio_ops = {
103 	.write      = coalesced_mmio_write,
104 	.destructor = coalesced_mmio_destructor,
105 };
106 
107 int kvm_coalesced_mmio_init(struct kvm *kvm)
108 {
109 	struct page *page;
110 	int ret;
111 
112 	ret = -ENOMEM;
113 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
114 	if (!page)
115 		goto out_err;
116 
117 	ret = 0;
118 	kvm->coalesced_mmio_ring = page_address(page);
119 
120 	/*
121 	 * We're using this spinlock to sync access to the coalesced ring.
122 	 * The list doesn't need it's own lock since device registration and
123 	 * unregistration should only happen when kvm->slots_lock is held.
124 	 */
125 	spin_lock_init(&kvm->ring_lock);
126 	INIT_LIST_HEAD(&kvm->coalesced_zones);
127 
128 out_err:
129 	return ret;
130 }
131 
132 void kvm_coalesced_mmio_free(struct kvm *kvm)
133 {
134 	if (kvm->coalesced_mmio_ring)
135 		free_page((unsigned long)kvm->coalesced_mmio_ring);
136 }
137 
138 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
139 					 struct kvm_coalesced_mmio_zone *zone)
140 {
141 	int ret;
142 	struct kvm_coalesced_mmio_dev *dev;
143 
144 	if (zone->pio != 1 && zone->pio != 0)
145 		return -EINVAL;
146 
147 	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
148 		      GFP_KERNEL_ACCOUNT);
149 	if (!dev)
150 		return -ENOMEM;
151 
152 	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
153 	dev->kvm = kvm;
154 	dev->zone = *zone;
155 
156 	mutex_lock(&kvm->slots_lock);
157 	ret = kvm_io_bus_register_dev(kvm,
158 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
159 				zone->addr, zone->size, &dev->dev);
160 	if (ret < 0)
161 		goto out_free_dev;
162 	list_add_tail(&dev->list, &kvm->coalesced_zones);
163 	mutex_unlock(&kvm->slots_lock);
164 
165 	return 0;
166 
167 out_free_dev:
168 	mutex_unlock(&kvm->slots_lock);
169 	kfree(dev);
170 
171 	return ret;
172 }
173 
174 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
175 					   struct kvm_coalesced_mmio_zone *zone)
176 {
177 	struct kvm_coalesced_mmio_dev *dev, *tmp;
178 
179 	if (zone->pio != 1 && zone->pio != 0)
180 		return -EINVAL;
181 
182 	mutex_lock(&kvm->slots_lock);
183 
184 	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
185 		if (zone->pio == dev->zone.pio &&
186 		    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
187 			kvm_io_bus_unregister_dev(kvm,
188 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
189 			kvm_iodevice_destructor(&dev->dev);
190 		}
191 
192 	mutex_unlock(&kvm->slots_lock);
193 
194 	return 0;
195 }
196