xref: /openbmc/linux/virt/kvm/coalesced_mmio.c (revision 64a2268dcfc9c3626aa7f70902690e2fc10c1630)
15f94c174SLaurent Vivier /*
25f94c174SLaurent Vivier  * KVM coalesced MMIO
35f94c174SLaurent Vivier  *
45f94c174SLaurent Vivier  * Copyright (c) 2008 Bull S.A.S.
55f94c174SLaurent Vivier  *
65f94c174SLaurent Vivier  *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
75f94c174SLaurent Vivier  *
85f94c174SLaurent Vivier  */
95f94c174SLaurent Vivier 
105f94c174SLaurent Vivier #include "iodev.h"
115f94c174SLaurent Vivier 
125f94c174SLaurent Vivier #include <linux/kvm_host.h>
135f94c174SLaurent Vivier #include <linux/kvm.h>
145f94c174SLaurent Vivier 
155f94c174SLaurent Vivier #include "coalesced_mmio.h"
165f94c174SLaurent Vivier 
17d76685c4SGregory Haskins static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
18d76685c4SGregory Haskins {
19d76685c4SGregory Haskins 	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
20d76685c4SGregory Haskins }
21d76685c4SGregory Haskins 
225f94c174SLaurent Vivier static int coalesced_mmio_in_range(struct kvm_io_device *this,
235f94c174SLaurent Vivier 				   gpa_t addr, int len, int is_write)
245f94c174SLaurent Vivier {
25d76685c4SGregory Haskins 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
265f94c174SLaurent Vivier 	struct kvm_coalesced_mmio_zone *zone;
27105f8d40SAvi Kivity 	struct kvm_coalesced_mmio_ring *ring;
28105f8d40SAvi Kivity 	unsigned avail;
295f94c174SLaurent Vivier 	int i;
305f94c174SLaurent Vivier 
315f94c174SLaurent Vivier 	if (!is_write)
325f94c174SLaurent Vivier 		return 0;
335f94c174SLaurent Vivier 
345f94c174SLaurent Vivier 	/* Are we able to batch it ? */
355f94c174SLaurent Vivier 
365f94c174SLaurent Vivier 	/* last is the first free entry
375f94c174SLaurent Vivier 	 * check if we don't meet the first used entry
385f94c174SLaurent Vivier 	 * there is always one unused entry in the buffer
395f94c174SLaurent Vivier 	 */
40105f8d40SAvi Kivity 	ring = dev->kvm->coalesced_mmio_ring;
41105f8d40SAvi Kivity 	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
42*64a2268dSMarcelo Tosatti 	if (avail < KVM_MAX_VCPUS) {
435f94c174SLaurent Vivier 		/* full */
445f94c174SLaurent Vivier 		return 0;
455f94c174SLaurent Vivier 	}
465f94c174SLaurent Vivier 
475f94c174SLaurent Vivier 	/* is it in a batchable area ? */
485f94c174SLaurent Vivier 
495f94c174SLaurent Vivier 	for (i = 0; i < dev->nb_zones; i++) {
505f94c174SLaurent Vivier 		zone = &dev->zone[i];
515f94c174SLaurent Vivier 
525f94c174SLaurent Vivier 		/* (addr,len) is fully included in
535f94c174SLaurent Vivier 		 * (zone->addr, zone->size)
545f94c174SLaurent Vivier 		 */
555f94c174SLaurent Vivier 
565f94c174SLaurent Vivier 		if (zone->addr <= addr &&
575f94c174SLaurent Vivier 		    addr + len <= zone->addr + zone->size)
585f94c174SLaurent Vivier 			return 1;
595f94c174SLaurent Vivier 	}
605f94c174SLaurent Vivier 	return 0;
615f94c174SLaurent Vivier }
625f94c174SLaurent Vivier 
635f94c174SLaurent Vivier static void coalesced_mmio_write(struct kvm_io_device *this,
645f94c174SLaurent Vivier 				 gpa_t addr, int len, const void *val)
655f94c174SLaurent Vivier {
66d76685c4SGregory Haskins 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
675f94c174SLaurent Vivier 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
685f94c174SLaurent Vivier 
69*64a2268dSMarcelo Tosatti 	spin_lock(&dev->lock);
705f94c174SLaurent Vivier 
715f94c174SLaurent Vivier 	/* copy data in first free entry of the ring */
725f94c174SLaurent Vivier 
735f94c174SLaurent Vivier 	ring->coalesced_mmio[ring->last].phys_addr = addr;
745f94c174SLaurent Vivier 	ring->coalesced_mmio[ring->last].len = len;
755f94c174SLaurent Vivier 	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
765f94c174SLaurent Vivier 	smp_wmb();
775f94c174SLaurent Vivier 	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
78*64a2268dSMarcelo Tosatti 	spin_unlock(&dev->lock);
795f94c174SLaurent Vivier }
805f94c174SLaurent Vivier 
815f94c174SLaurent Vivier static void coalesced_mmio_destructor(struct kvm_io_device *this)
825f94c174SLaurent Vivier {
83d76685c4SGregory Haskins 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
84787a660aSGregory Haskins 
85787a660aSGregory Haskins 	kfree(dev);
865f94c174SLaurent Vivier }
875f94c174SLaurent Vivier 
88d76685c4SGregory Haskins static const struct kvm_io_device_ops coalesced_mmio_ops = {
89d76685c4SGregory Haskins 	.write      = coalesced_mmio_write,
90d76685c4SGregory Haskins 	.in_range   = coalesced_mmio_in_range,
91d76685c4SGregory Haskins 	.destructor = coalesced_mmio_destructor,
92d76685c4SGregory Haskins };
93d76685c4SGregory Haskins 
945f94c174SLaurent Vivier int kvm_coalesced_mmio_init(struct kvm *kvm)
955f94c174SLaurent Vivier {
965f94c174SLaurent Vivier 	struct kvm_coalesced_mmio_dev *dev;
975f94c174SLaurent Vivier 
985f94c174SLaurent Vivier 	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
995f94c174SLaurent Vivier 	if (!dev)
1005f94c174SLaurent Vivier 		return -ENOMEM;
101*64a2268dSMarcelo Tosatti 	spin_lock_init(&dev->lock);
102d76685c4SGregory Haskins 	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
1035f94c174SLaurent Vivier 	dev->kvm = kvm;
1045f94c174SLaurent Vivier 	kvm->coalesced_mmio_dev = dev;
1055f94c174SLaurent Vivier 	kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
1065f94c174SLaurent Vivier 
1075f94c174SLaurent Vivier 	return 0;
1085f94c174SLaurent Vivier }
1095f94c174SLaurent Vivier 
1105f94c174SLaurent Vivier int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
1115f94c174SLaurent Vivier 				         struct kvm_coalesced_mmio_zone *zone)
1125f94c174SLaurent Vivier {
1135f94c174SLaurent Vivier 	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
1145f94c174SLaurent Vivier 
1155f94c174SLaurent Vivier 	if (dev == NULL)
1165f94c174SLaurent Vivier 		return -EINVAL;
1175f94c174SLaurent Vivier 
1185f94c174SLaurent Vivier 	mutex_lock(&kvm->lock);
1195f94c174SLaurent Vivier 	if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
1205f94c174SLaurent Vivier 		mutex_unlock(&kvm->lock);
1215f94c174SLaurent Vivier 		return -ENOBUFS;
1225f94c174SLaurent Vivier 	}
1235f94c174SLaurent Vivier 
1245f94c174SLaurent Vivier 	dev->zone[dev->nb_zones] = *zone;
1255f94c174SLaurent Vivier 	dev->nb_zones++;
1265f94c174SLaurent Vivier 
1275f94c174SLaurent Vivier 	mutex_unlock(&kvm->lock);
1285f94c174SLaurent Vivier 	return 0;
1295f94c174SLaurent Vivier }
1305f94c174SLaurent Vivier 
1315f94c174SLaurent Vivier int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
1325f94c174SLaurent Vivier 					   struct kvm_coalesced_mmio_zone *zone)
1335f94c174SLaurent Vivier {
1345f94c174SLaurent Vivier 	int i;
1355f94c174SLaurent Vivier 	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
1365f94c174SLaurent Vivier 	struct kvm_coalesced_mmio_zone *z;
1375f94c174SLaurent Vivier 
1385f94c174SLaurent Vivier 	if (dev == NULL)
1395f94c174SLaurent Vivier 		return -EINVAL;
1405f94c174SLaurent Vivier 
1415f94c174SLaurent Vivier 	mutex_lock(&kvm->lock);
1425f94c174SLaurent Vivier 
1435f94c174SLaurent Vivier 	i = dev->nb_zones;
1445f94c174SLaurent Vivier 	while(i) {
1455f94c174SLaurent Vivier 		z = &dev->zone[i - 1];
1465f94c174SLaurent Vivier 
1475f94c174SLaurent Vivier 		/* unregister all zones
1485f94c174SLaurent Vivier 		 * included in (zone->addr, zone->size)
1495f94c174SLaurent Vivier 		 */
1505f94c174SLaurent Vivier 
1515f94c174SLaurent Vivier 		if (zone->addr <= z->addr &&
1525f94c174SLaurent Vivier 		    z->addr + z->size <= zone->addr + zone->size) {
1535f94c174SLaurent Vivier 			dev->nb_zones--;
1545f94c174SLaurent Vivier 			*z = dev->zone[dev->nb_zones];
1555f94c174SLaurent Vivier 		}
1565f94c174SLaurent Vivier 		i--;
1575f94c174SLaurent Vivier 	}
1585f94c174SLaurent Vivier 
1595f94c174SLaurent Vivier 	mutex_unlock(&kvm->lock);
1605f94c174SLaurent Vivier 
1615f94c174SLaurent Vivier 	return 0;
1625f94c174SLaurent Vivier }
163