15f94c174SLaurent Vivier /* 25f94c174SLaurent Vivier * KVM coalesced MMIO 35f94c174SLaurent Vivier * 45f94c174SLaurent Vivier * Copyright (c) 2008 Bull S.A.S. 55f94c174SLaurent Vivier * 65f94c174SLaurent Vivier * Author: Laurent Vivier <Laurent.Vivier@bull.net> 75f94c174SLaurent Vivier * 85f94c174SLaurent Vivier */ 95f94c174SLaurent Vivier 105f94c174SLaurent Vivier #include "iodev.h" 115f94c174SLaurent Vivier 125f94c174SLaurent Vivier #include <linux/kvm_host.h> 135f94c174SLaurent Vivier #include <linux/kvm.h> 145f94c174SLaurent Vivier 155f94c174SLaurent Vivier #include "coalesced_mmio.h" 165f94c174SLaurent Vivier 175f94c174SLaurent Vivier static int coalesced_mmio_in_range(struct kvm_io_device *this, 185f94c174SLaurent Vivier gpa_t addr, int len, int is_write) 195f94c174SLaurent Vivier { 205f94c174SLaurent Vivier struct kvm_coalesced_mmio_dev *dev = 215f94c174SLaurent Vivier (struct kvm_coalesced_mmio_dev*)this->private; 225f94c174SLaurent Vivier struct kvm_coalesced_mmio_zone *zone; 235f94c174SLaurent Vivier int next; 245f94c174SLaurent Vivier int i; 255f94c174SLaurent Vivier 265f94c174SLaurent Vivier if (!is_write) 275f94c174SLaurent Vivier return 0; 285f94c174SLaurent Vivier 295f94c174SLaurent Vivier /* kvm->lock is taken by the caller and must be not released before 305f94c174SLaurent Vivier * dev.read/write 315f94c174SLaurent Vivier */ 325f94c174SLaurent Vivier 335f94c174SLaurent Vivier /* Are we able to batch it ? */ 345f94c174SLaurent Vivier 355f94c174SLaurent Vivier /* last is the first free entry 365f94c174SLaurent Vivier * check if we don't meet the first used entry 375f94c174SLaurent Vivier * there is always one unused entry in the buffer 385f94c174SLaurent Vivier */ 395f94c174SLaurent Vivier 405f94c174SLaurent Vivier next = (dev->kvm->coalesced_mmio_ring->last + 1) % 415f94c174SLaurent Vivier KVM_COALESCED_MMIO_MAX; 425f94c174SLaurent Vivier if (next == dev->kvm->coalesced_mmio_ring->first) { 435f94c174SLaurent Vivier /* full */ 445f94c174SLaurent Vivier return 0; 455f94c174SLaurent Vivier } 465f94c174SLaurent Vivier 475f94c174SLaurent Vivier /* is it in a batchable area ? */ 485f94c174SLaurent Vivier 495f94c174SLaurent Vivier for (i = 0; i < dev->nb_zones; i++) { 505f94c174SLaurent Vivier zone = &dev->zone[i]; 515f94c174SLaurent Vivier 525f94c174SLaurent Vivier /* (addr,len) is fully included in 535f94c174SLaurent Vivier * (zone->addr, zone->size) 545f94c174SLaurent Vivier */ 555f94c174SLaurent Vivier 565f94c174SLaurent Vivier if (zone->addr <= addr && 575f94c174SLaurent Vivier addr + len <= zone->addr + zone->size) 585f94c174SLaurent Vivier return 1; 595f94c174SLaurent Vivier } 605f94c174SLaurent Vivier return 0; 615f94c174SLaurent Vivier } 625f94c174SLaurent Vivier 635f94c174SLaurent Vivier static void coalesced_mmio_write(struct kvm_io_device *this, 645f94c174SLaurent Vivier gpa_t addr, int len, const void *val) 655f94c174SLaurent Vivier { 665f94c174SLaurent Vivier struct kvm_coalesced_mmio_dev *dev = 675f94c174SLaurent Vivier (struct kvm_coalesced_mmio_dev*)this->private; 685f94c174SLaurent Vivier struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; 695f94c174SLaurent Vivier 705f94c174SLaurent Vivier /* kvm->lock must be taken by caller before call to in_range()*/ 715f94c174SLaurent Vivier 725f94c174SLaurent Vivier /* copy data in first free entry of the ring */ 735f94c174SLaurent Vivier 745f94c174SLaurent Vivier ring->coalesced_mmio[ring->last].phys_addr = addr; 755f94c174SLaurent Vivier ring->coalesced_mmio[ring->last].len = len; 765f94c174SLaurent Vivier memcpy(ring->coalesced_mmio[ring->last].data, val, len); 775f94c174SLaurent Vivier smp_wmb(); 785f94c174SLaurent Vivier ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; 795f94c174SLaurent Vivier } 805f94c174SLaurent Vivier 815f94c174SLaurent Vivier static void coalesced_mmio_destructor(struct kvm_io_device *this) 825f94c174SLaurent Vivier { 83*787a660aSGregory Haskins struct kvm_coalesced_mmio_dev *dev = 84*787a660aSGregory Haskins (struct kvm_coalesced_mmio_dev *)this->private; 85*787a660aSGregory Haskins 86*787a660aSGregory Haskins kfree(dev); 875f94c174SLaurent Vivier } 885f94c174SLaurent Vivier 895f94c174SLaurent Vivier int kvm_coalesced_mmio_init(struct kvm *kvm) 905f94c174SLaurent Vivier { 915f94c174SLaurent Vivier struct kvm_coalesced_mmio_dev *dev; 925f94c174SLaurent Vivier 935f94c174SLaurent Vivier dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); 945f94c174SLaurent Vivier if (!dev) 955f94c174SLaurent Vivier return -ENOMEM; 965f94c174SLaurent Vivier dev->dev.write = coalesced_mmio_write; 975f94c174SLaurent Vivier dev->dev.in_range = coalesced_mmio_in_range; 985f94c174SLaurent Vivier dev->dev.destructor = coalesced_mmio_destructor; 995f94c174SLaurent Vivier dev->dev.private = dev; 1005f94c174SLaurent Vivier dev->kvm = kvm; 1015f94c174SLaurent Vivier kvm->coalesced_mmio_dev = dev; 1025f94c174SLaurent Vivier kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev); 1035f94c174SLaurent Vivier 1045f94c174SLaurent Vivier return 0; 1055f94c174SLaurent Vivier } 1065f94c174SLaurent Vivier 1075f94c174SLaurent Vivier int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, 1085f94c174SLaurent Vivier struct kvm_coalesced_mmio_zone *zone) 1095f94c174SLaurent Vivier { 1105f94c174SLaurent Vivier struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; 1115f94c174SLaurent Vivier 1125f94c174SLaurent Vivier if (dev == NULL) 1135f94c174SLaurent Vivier return -EINVAL; 1145f94c174SLaurent Vivier 1155f94c174SLaurent Vivier mutex_lock(&kvm->lock); 1165f94c174SLaurent Vivier if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { 1175f94c174SLaurent Vivier mutex_unlock(&kvm->lock); 1185f94c174SLaurent Vivier return -ENOBUFS; 1195f94c174SLaurent Vivier } 1205f94c174SLaurent Vivier 1215f94c174SLaurent Vivier dev->zone[dev->nb_zones] = *zone; 1225f94c174SLaurent Vivier dev->nb_zones++; 1235f94c174SLaurent Vivier 1245f94c174SLaurent Vivier mutex_unlock(&kvm->lock); 1255f94c174SLaurent Vivier return 0; 1265f94c174SLaurent Vivier } 1275f94c174SLaurent Vivier 1285f94c174SLaurent Vivier int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, 1295f94c174SLaurent Vivier struct kvm_coalesced_mmio_zone *zone) 1305f94c174SLaurent Vivier { 1315f94c174SLaurent Vivier int i; 1325f94c174SLaurent Vivier struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; 1335f94c174SLaurent Vivier struct kvm_coalesced_mmio_zone *z; 1345f94c174SLaurent Vivier 1355f94c174SLaurent Vivier if (dev == NULL) 1365f94c174SLaurent Vivier return -EINVAL; 1375f94c174SLaurent Vivier 1385f94c174SLaurent Vivier mutex_lock(&kvm->lock); 1395f94c174SLaurent Vivier 1405f94c174SLaurent Vivier i = dev->nb_zones; 1415f94c174SLaurent Vivier while(i) { 1425f94c174SLaurent Vivier z = &dev->zone[i - 1]; 1435f94c174SLaurent Vivier 1445f94c174SLaurent Vivier /* unregister all zones 1455f94c174SLaurent Vivier * included in (zone->addr, zone->size) 1465f94c174SLaurent Vivier */ 1475f94c174SLaurent Vivier 1485f94c174SLaurent Vivier if (zone->addr <= z->addr && 1495f94c174SLaurent Vivier z->addr + z->size <= zone->addr + zone->size) { 1505f94c174SLaurent Vivier dev->nb_zones--; 1515f94c174SLaurent Vivier *z = dev->zone[dev->nb_zones]; 1525f94c174SLaurent Vivier } 1535f94c174SLaurent Vivier i--; 1545f94c174SLaurent Vivier } 1555f94c174SLaurent Vivier 1565f94c174SLaurent Vivier mutex_unlock(&kvm->lock); 1575f94c174SLaurent Vivier 1585f94c174SLaurent Vivier return 0; 1595f94c174SLaurent Vivier } 160