1 /* 2 * VFIO-KVM bridge pseudo device 3 * 4 * Copyright (C) 2013 Red Hat, Inc. All rights reserved. 5 * Author: Alex Williamson <alex.williamson@redhat.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/file.h> 14 #include <linux/kvm_host.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/slab.h> 19 #include <linux/uaccess.h> 20 #include <linux/vfio.h> 21 #include "vfio.h" 22 23 struct kvm_vfio_group { 24 struct list_head node; 25 struct vfio_group *vfio_group; 26 }; 27 28 struct kvm_vfio { 29 struct list_head group_list; 30 struct mutex lock; 31 bool noncoherent; 32 }; 33 34 static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep) 35 { 36 struct vfio_group *vfio_group; 37 struct vfio_group *(*fn)(struct file *); 38 39 fn = symbol_get(vfio_group_get_external_user); 40 if (!fn) 41 return ERR_PTR(-EINVAL); 42 43 vfio_group = fn(filep); 44 45 symbol_put(vfio_group_get_external_user); 46 47 return vfio_group; 48 } 49 50 static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) 51 { 52 void (*fn)(struct vfio_group *); 53 54 fn = symbol_get(vfio_group_put_external_user); 55 if (!fn) 56 return; 57 58 fn(vfio_group); 59 60 symbol_put(vfio_group_put_external_user); 61 } 62 63 static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group) 64 { 65 long (*fn)(struct vfio_group *, unsigned long); 66 long ret; 67 68 fn = symbol_get(vfio_external_check_extension); 69 if (!fn) 70 return false; 71 72 ret = fn(vfio_group, VFIO_DMA_CC_IOMMU); 73 74 symbol_put(vfio_external_check_extension); 75 76 return ret > 0; 77 } 78 79 /* 80 * Groups can use the same or different IOMMU domains. If the same then 81 * adding a new group may change the coherency of groups we've previously 82 * been told about. We don't want to care about any of that so we retest 83 * each group and bail as soon as we find one that's noncoherent. This 84 * means we only ever [un]register_noncoherent_dma once for the whole device. 85 */ 86 static void kvm_vfio_update_coherency(struct kvm_device *dev) 87 { 88 struct kvm_vfio *kv = dev->private; 89 bool noncoherent = false; 90 struct kvm_vfio_group *kvg; 91 92 mutex_lock(&kv->lock); 93 94 list_for_each_entry(kvg, &kv->group_list, node) { 95 if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) { 96 noncoherent = true; 97 break; 98 } 99 } 100 101 if (noncoherent != kv->noncoherent) { 102 kv->noncoherent = noncoherent; 103 104 if (kv->noncoherent) 105 kvm_arch_register_noncoherent_dma(dev->kvm); 106 else 107 kvm_arch_unregister_noncoherent_dma(dev->kvm); 108 } 109 110 mutex_unlock(&kv->lock); 111 } 112 113 static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) 114 { 115 struct kvm_vfio *kv = dev->private; 116 struct vfio_group *vfio_group; 117 struct kvm_vfio_group *kvg; 118 int32_t __user *argp = (int32_t __user *)(unsigned long)arg; 119 struct fd f; 120 int32_t fd; 121 int ret; 122 123 switch (attr) { 124 case KVM_DEV_VFIO_GROUP_ADD: 125 if (get_user(fd, argp)) 126 return -EFAULT; 127 128 f = fdget(fd); 129 if (!f.file) 130 return -EBADF; 131 132 vfio_group = kvm_vfio_group_get_external_user(f.file); 133 fdput(f); 134 135 if (IS_ERR(vfio_group)) 136 return PTR_ERR(vfio_group); 137 138 mutex_lock(&kv->lock); 139 140 list_for_each_entry(kvg, &kv->group_list, node) { 141 if (kvg->vfio_group == vfio_group) { 142 mutex_unlock(&kv->lock); 143 kvm_vfio_group_put_external_user(vfio_group); 144 return -EEXIST; 145 } 146 } 147 148 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL); 149 if (!kvg) { 150 mutex_unlock(&kv->lock); 151 kvm_vfio_group_put_external_user(vfio_group); 152 return -ENOMEM; 153 } 154 155 list_add_tail(&kvg->node, &kv->group_list); 156 kvg->vfio_group = vfio_group; 157 158 mutex_unlock(&kv->lock); 159 160 kvm_vfio_update_coherency(dev); 161 162 return 0; 163 164 case KVM_DEV_VFIO_GROUP_DEL: 165 if (get_user(fd, argp)) 166 return -EFAULT; 167 168 f = fdget(fd); 169 if (!f.file) 170 return -EBADF; 171 172 vfio_group = kvm_vfio_group_get_external_user(f.file); 173 fdput(f); 174 175 if (IS_ERR(vfio_group)) 176 return PTR_ERR(vfio_group); 177 178 ret = -ENOENT; 179 180 mutex_lock(&kv->lock); 181 182 list_for_each_entry(kvg, &kv->group_list, node) { 183 if (kvg->vfio_group != vfio_group) 184 continue; 185 186 list_del(&kvg->node); 187 kvm_vfio_group_put_external_user(kvg->vfio_group); 188 kfree(kvg); 189 ret = 0; 190 break; 191 } 192 193 mutex_unlock(&kv->lock); 194 195 kvm_vfio_group_put_external_user(vfio_group); 196 197 kvm_vfio_update_coherency(dev); 198 199 return ret; 200 } 201 202 return -ENXIO; 203 } 204 205 static int kvm_vfio_set_attr(struct kvm_device *dev, 206 struct kvm_device_attr *attr) 207 { 208 switch (attr->group) { 209 case KVM_DEV_VFIO_GROUP: 210 return kvm_vfio_set_group(dev, attr->attr, attr->addr); 211 } 212 213 return -ENXIO; 214 } 215 216 static int kvm_vfio_has_attr(struct kvm_device *dev, 217 struct kvm_device_attr *attr) 218 { 219 switch (attr->group) { 220 case KVM_DEV_VFIO_GROUP: 221 switch (attr->attr) { 222 case KVM_DEV_VFIO_GROUP_ADD: 223 case KVM_DEV_VFIO_GROUP_DEL: 224 return 0; 225 } 226 227 break; 228 } 229 230 return -ENXIO; 231 } 232 233 static void kvm_vfio_destroy(struct kvm_device *dev) 234 { 235 struct kvm_vfio *kv = dev->private; 236 struct kvm_vfio_group *kvg, *tmp; 237 238 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) { 239 kvm_vfio_group_put_external_user(kvg->vfio_group); 240 list_del(&kvg->node); 241 kfree(kvg); 242 } 243 244 kvm_vfio_update_coherency(dev); 245 246 kfree(kv); 247 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */ 248 } 249 250 static int kvm_vfio_create(struct kvm_device *dev, u32 type); 251 252 static struct kvm_device_ops kvm_vfio_ops = { 253 .name = "kvm-vfio", 254 .create = kvm_vfio_create, 255 .destroy = kvm_vfio_destroy, 256 .set_attr = kvm_vfio_set_attr, 257 .has_attr = kvm_vfio_has_attr, 258 }; 259 260 static int kvm_vfio_create(struct kvm_device *dev, u32 type) 261 { 262 struct kvm_device *tmp; 263 struct kvm_vfio *kv; 264 265 /* Only one VFIO "device" per VM */ 266 list_for_each_entry(tmp, &dev->kvm->devices, vm_node) 267 if (tmp->ops == &kvm_vfio_ops) 268 return -EBUSY; 269 270 kv = kzalloc(sizeof(*kv), GFP_KERNEL); 271 if (!kv) 272 return -ENOMEM; 273 274 INIT_LIST_HEAD(&kv->group_list); 275 mutex_init(&kv->lock); 276 277 dev->private = kv; 278 279 return 0; 280 } 281 282 int kvm_vfio_ops_init(void) 283 { 284 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); 285 } 286 287 void kvm_vfio_ops_exit(void) 288 { 289 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); 290 } 291