1 /* 2 * VFIO-KVM bridge pseudo device 3 * 4 * Copyright (C) 2013 Red Hat, Inc. All rights reserved. 5 * Author: Alex Williamson <alex.williamson@redhat.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/file.h> 14 #include <linux/kvm_host.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/slab.h> 19 #include <linux/uaccess.h> 20 #include <linux/vfio.h> 21 #include "vfio.h" 22 23 struct kvm_vfio_group { 24 struct list_head node; 25 struct vfio_group *vfio_group; 26 }; 27 28 struct kvm_vfio { 29 struct list_head group_list; 30 struct mutex lock; 31 bool noncoherent; 32 }; 33 34 static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep) 35 { 36 struct vfio_group *vfio_group; 37 struct vfio_group *(*fn)(struct file *); 38 39 fn = symbol_get(vfio_group_get_external_user); 40 if (!fn) 41 return ERR_PTR(-EINVAL); 42 43 vfio_group = fn(filep); 44 45 symbol_put(vfio_group_get_external_user); 46 47 return vfio_group; 48 } 49 50 static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) 51 { 52 void (*fn)(struct vfio_group *); 53 54 fn = symbol_get(vfio_group_put_external_user); 55 if (!fn) 56 return; 57 58 fn(vfio_group); 59 60 symbol_put(vfio_group_put_external_user); 61 } 62 63 static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) 64 { 65 void (*fn)(struct vfio_group *, struct kvm *); 66 67 fn = symbol_get(vfio_group_set_kvm); 68 if (!fn) 69 return; 70 71 fn(group, kvm); 72 73 symbol_put(vfio_group_set_kvm); 74 } 75 76 static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group) 77 { 78 long (*fn)(struct vfio_group *, unsigned long); 79 long ret; 80 81 fn = symbol_get(vfio_external_check_extension); 82 if (!fn) 83 return false; 84 85 ret = fn(vfio_group, VFIO_DMA_CC_IOMMU); 86 87 symbol_put(vfio_external_check_extension); 88 89 return ret > 0; 90 } 91 92 /* 93 * Groups can use the same or different IOMMU domains. If the same then 94 * adding a new group may change the coherency of groups we've previously 95 * been told about. We don't want to care about any of that so we retest 96 * each group and bail as soon as we find one that's noncoherent. This 97 * means we only ever [un]register_noncoherent_dma once for the whole device. 98 */ 99 static void kvm_vfio_update_coherency(struct kvm_device *dev) 100 { 101 struct kvm_vfio *kv = dev->private; 102 bool noncoherent = false; 103 struct kvm_vfio_group *kvg; 104 105 mutex_lock(&kv->lock); 106 107 list_for_each_entry(kvg, &kv->group_list, node) { 108 if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) { 109 noncoherent = true; 110 break; 111 } 112 } 113 114 if (noncoherent != kv->noncoherent) { 115 kv->noncoherent = noncoherent; 116 117 if (kv->noncoherent) 118 kvm_arch_register_noncoherent_dma(dev->kvm); 119 else 120 kvm_arch_unregister_noncoherent_dma(dev->kvm); 121 } 122 123 mutex_unlock(&kv->lock); 124 } 125 126 static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) 127 { 128 struct kvm_vfio *kv = dev->private; 129 struct vfio_group *vfio_group; 130 struct kvm_vfio_group *kvg; 131 int32_t __user *argp = (int32_t __user *)(unsigned long)arg; 132 struct fd f; 133 int32_t fd; 134 int ret; 135 136 switch (attr) { 137 case KVM_DEV_VFIO_GROUP_ADD: 138 if (get_user(fd, argp)) 139 return -EFAULT; 140 141 f = fdget(fd); 142 if (!f.file) 143 return -EBADF; 144 145 vfio_group = kvm_vfio_group_get_external_user(f.file); 146 fdput(f); 147 148 if (IS_ERR(vfio_group)) 149 return PTR_ERR(vfio_group); 150 151 mutex_lock(&kv->lock); 152 153 list_for_each_entry(kvg, &kv->group_list, node) { 154 if (kvg->vfio_group == vfio_group) { 155 mutex_unlock(&kv->lock); 156 kvm_vfio_group_put_external_user(vfio_group); 157 return -EEXIST; 158 } 159 } 160 161 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL); 162 if (!kvg) { 163 mutex_unlock(&kv->lock); 164 kvm_vfio_group_put_external_user(vfio_group); 165 return -ENOMEM; 166 } 167 168 list_add_tail(&kvg->node, &kv->group_list); 169 kvg->vfio_group = vfio_group; 170 171 kvm_arch_start_assignment(dev->kvm); 172 173 mutex_unlock(&kv->lock); 174 175 kvm_vfio_group_set_kvm(vfio_group, dev->kvm); 176 177 kvm_vfio_update_coherency(dev); 178 179 return 0; 180 181 case KVM_DEV_VFIO_GROUP_DEL: 182 if (get_user(fd, argp)) 183 return -EFAULT; 184 185 f = fdget(fd); 186 if (!f.file) 187 return -EBADF; 188 189 vfio_group = kvm_vfio_group_get_external_user(f.file); 190 fdput(f); 191 192 if (IS_ERR(vfio_group)) 193 return PTR_ERR(vfio_group); 194 195 ret = -ENOENT; 196 197 mutex_lock(&kv->lock); 198 199 list_for_each_entry(kvg, &kv->group_list, node) { 200 if (kvg->vfio_group != vfio_group) 201 continue; 202 203 list_del(&kvg->node); 204 kvm_vfio_group_put_external_user(kvg->vfio_group); 205 kfree(kvg); 206 ret = 0; 207 break; 208 } 209 210 kvm_arch_end_assignment(dev->kvm); 211 212 mutex_unlock(&kv->lock); 213 214 kvm_vfio_group_set_kvm(vfio_group, NULL); 215 216 kvm_vfio_group_put_external_user(vfio_group); 217 218 kvm_vfio_update_coherency(dev); 219 220 return ret; 221 } 222 223 return -ENXIO; 224 } 225 226 static int kvm_vfio_set_attr(struct kvm_device *dev, 227 struct kvm_device_attr *attr) 228 { 229 switch (attr->group) { 230 case KVM_DEV_VFIO_GROUP: 231 return kvm_vfio_set_group(dev, attr->attr, attr->addr); 232 } 233 234 return -ENXIO; 235 } 236 237 static int kvm_vfio_has_attr(struct kvm_device *dev, 238 struct kvm_device_attr *attr) 239 { 240 switch (attr->group) { 241 case KVM_DEV_VFIO_GROUP: 242 switch (attr->attr) { 243 case KVM_DEV_VFIO_GROUP_ADD: 244 case KVM_DEV_VFIO_GROUP_DEL: 245 return 0; 246 } 247 248 break; 249 } 250 251 return -ENXIO; 252 } 253 254 static void kvm_vfio_destroy(struct kvm_device *dev) 255 { 256 struct kvm_vfio *kv = dev->private; 257 struct kvm_vfio_group *kvg, *tmp; 258 259 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) { 260 kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); 261 kvm_vfio_group_put_external_user(kvg->vfio_group); 262 list_del(&kvg->node); 263 kfree(kvg); 264 kvm_arch_end_assignment(dev->kvm); 265 } 266 267 kvm_vfio_update_coherency(dev); 268 269 kfree(kv); 270 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */ 271 } 272 273 static int kvm_vfio_create(struct kvm_device *dev, u32 type); 274 275 static struct kvm_device_ops kvm_vfio_ops = { 276 .name = "kvm-vfio", 277 .create = kvm_vfio_create, 278 .destroy = kvm_vfio_destroy, 279 .set_attr = kvm_vfio_set_attr, 280 .has_attr = kvm_vfio_has_attr, 281 }; 282 283 static int kvm_vfio_create(struct kvm_device *dev, u32 type) 284 { 285 struct kvm_device *tmp; 286 struct kvm_vfio *kv; 287 288 /* Only one VFIO "device" per VM */ 289 list_for_each_entry(tmp, &dev->kvm->devices, vm_node) 290 if (tmp->ops == &kvm_vfio_ops) 291 return -EBUSY; 292 293 kv = kzalloc(sizeof(*kv), GFP_KERNEL); 294 if (!kv) 295 return -ENOMEM; 296 297 INIT_LIST_HEAD(&kv->group_list); 298 mutex_init(&kv->lock); 299 300 dev->private = kv; 301 302 return 0; 303 } 304 305 int kvm_vfio_ops_init(void) 306 { 307 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); 308 } 309 310 void kvm_vfio_ops_exit(void) 311 { 312 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); 313 } 314