1 /* 2 * VFIO-KVM bridge pseudo device 3 * 4 * Copyright (C) 2013 Red Hat, Inc. All rights reserved. 5 * Author: Alex Williamson <alex.williamson@redhat.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/file.h> 14 #include <linux/kvm_host.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/slab.h> 19 #include <linux/uaccess.h> 20 #include <linux/vfio.h> 21 #include "vfio.h" 22 23 #ifdef CONFIG_SPAPR_TCE_IOMMU 24 #include <asm/kvm_ppc.h> 25 #endif 26 27 struct kvm_vfio_group { 28 struct list_head node; 29 struct vfio_group *vfio_group; 30 }; 31 32 struct kvm_vfio { 33 struct list_head group_list; 34 struct mutex lock; 35 bool noncoherent; 36 }; 37 38 static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep) 39 { 40 struct vfio_group *vfio_group; 41 struct vfio_group *(*fn)(struct file *); 42 43 fn = symbol_get(vfio_group_get_external_user); 44 if (!fn) 45 return ERR_PTR(-EINVAL); 46 47 vfio_group = fn(filep); 48 49 symbol_put(vfio_group_get_external_user); 50 51 return vfio_group; 52 } 53 54 static bool kvm_vfio_external_group_match_file(struct vfio_group *group, 55 struct file *filep) 56 { 57 bool ret, (*fn)(struct vfio_group *, struct file *); 58 59 fn = symbol_get(vfio_external_group_match_file); 60 if (!fn) 61 return false; 62 63 ret = fn(group, filep); 64 65 symbol_put(vfio_external_group_match_file); 66 67 return ret; 68 } 69 70 static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) 71 { 72 void (*fn)(struct vfio_group *); 73 74 fn = symbol_get(vfio_group_put_external_user); 75 if (!fn) 76 return; 77 78 fn(vfio_group); 79 80 symbol_put(vfio_group_put_external_user); 81 } 82 83 static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) 84 { 85 void (*fn)(struct vfio_group *, struct kvm *); 86 87 fn = symbol_get(vfio_group_set_kvm); 88 if (!fn) 89 return; 90 91 fn(group, kvm); 92 93 symbol_put(vfio_group_set_kvm); 94 } 95 96 static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group) 97 { 98 long (*fn)(struct vfio_group *, unsigned long); 99 long ret; 100 101 fn = symbol_get(vfio_external_check_extension); 102 if (!fn) 103 return false; 104 105 ret = fn(vfio_group, VFIO_DMA_CC_IOMMU); 106 107 symbol_put(vfio_external_check_extension); 108 109 return ret > 0; 110 } 111 112 #ifdef CONFIG_SPAPR_TCE_IOMMU 113 static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group) 114 { 115 int (*fn)(struct vfio_group *); 116 int ret = -EINVAL; 117 118 fn = symbol_get(vfio_external_user_iommu_id); 119 if (!fn) 120 return ret; 121 122 ret = fn(vfio_group); 123 124 symbol_put(vfio_external_user_iommu_id); 125 126 return ret; 127 } 128 129 static struct iommu_group *kvm_vfio_group_get_iommu_group( 130 struct vfio_group *group) 131 { 132 int group_id = kvm_vfio_external_user_iommu_id(group); 133 134 if (group_id < 0) 135 return NULL; 136 137 return iommu_group_get_by_id(group_id); 138 } 139 140 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, 141 struct vfio_group *vfio_group) 142 { 143 struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group); 144 145 if (WARN_ON_ONCE(!grp)) 146 return; 147 148 kvm_spapr_tce_release_iommu_group(kvm, grp); 149 iommu_group_put(grp); 150 } 151 #endif 152 153 /* 154 * Groups can use the same or different IOMMU domains. If the same then 155 * adding a new group may change the coherency of groups we've previously 156 * been told about. We don't want to care about any of that so we retest 157 * each group and bail as soon as we find one that's noncoherent. This 158 * means we only ever [un]register_noncoherent_dma once for the whole device. 159 */ 160 static void kvm_vfio_update_coherency(struct kvm_device *dev) 161 { 162 struct kvm_vfio *kv = dev->private; 163 bool noncoherent = false; 164 struct kvm_vfio_group *kvg; 165 166 mutex_lock(&kv->lock); 167 168 list_for_each_entry(kvg, &kv->group_list, node) { 169 if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) { 170 noncoherent = true; 171 break; 172 } 173 } 174 175 if (noncoherent != kv->noncoherent) { 176 kv->noncoherent = noncoherent; 177 178 if (kv->noncoherent) 179 kvm_arch_register_noncoherent_dma(dev->kvm); 180 else 181 kvm_arch_unregister_noncoherent_dma(dev->kvm); 182 } 183 184 mutex_unlock(&kv->lock); 185 } 186 187 static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) 188 { 189 struct kvm_vfio *kv = dev->private; 190 struct vfio_group *vfio_group; 191 struct kvm_vfio_group *kvg; 192 int32_t __user *argp = (int32_t __user *)(unsigned long)arg; 193 struct fd f; 194 int32_t fd; 195 int ret; 196 197 switch (attr) { 198 case KVM_DEV_VFIO_GROUP_ADD: 199 if (get_user(fd, argp)) 200 return -EFAULT; 201 202 f = fdget(fd); 203 if (!f.file) 204 return -EBADF; 205 206 vfio_group = kvm_vfio_group_get_external_user(f.file); 207 fdput(f); 208 209 if (IS_ERR(vfio_group)) 210 return PTR_ERR(vfio_group); 211 212 mutex_lock(&kv->lock); 213 214 list_for_each_entry(kvg, &kv->group_list, node) { 215 if (kvg->vfio_group == vfio_group) { 216 mutex_unlock(&kv->lock); 217 kvm_vfio_group_put_external_user(vfio_group); 218 return -EEXIST; 219 } 220 } 221 222 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL); 223 if (!kvg) { 224 mutex_unlock(&kv->lock); 225 kvm_vfio_group_put_external_user(vfio_group); 226 return -ENOMEM; 227 } 228 229 list_add_tail(&kvg->node, &kv->group_list); 230 kvg->vfio_group = vfio_group; 231 232 kvm_arch_start_assignment(dev->kvm); 233 234 mutex_unlock(&kv->lock); 235 236 kvm_vfio_group_set_kvm(vfio_group, dev->kvm); 237 238 kvm_vfio_update_coherency(dev); 239 240 return 0; 241 242 case KVM_DEV_VFIO_GROUP_DEL: 243 if (get_user(fd, argp)) 244 return -EFAULT; 245 246 f = fdget(fd); 247 if (!f.file) 248 return -EBADF; 249 250 ret = -ENOENT; 251 252 mutex_lock(&kv->lock); 253 254 list_for_each_entry(kvg, &kv->group_list, node) { 255 if (!kvm_vfio_external_group_match_file(kvg->vfio_group, 256 f.file)) 257 continue; 258 259 list_del(&kvg->node); 260 kvm_arch_end_assignment(dev->kvm); 261 #ifdef CONFIG_SPAPR_TCE_IOMMU 262 kvm_spapr_tce_release_vfio_group(dev->kvm, 263 kvg->vfio_group); 264 #endif 265 kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); 266 kvm_vfio_group_put_external_user(kvg->vfio_group); 267 kfree(kvg); 268 ret = 0; 269 break; 270 } 271 272 mutex_unlock(&kv->lock); 273 274 fdput(f); 275 276 kvm_vfio_update_coherency(dev); 277 278 return ret; 279 280 #ifdef CONFIG_SPAPR_TCE_IOMMU 281 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: { 282 struct kvm_vfio_spapr_tce param; 283 struct kvm_vfio *kv = dev->private; 284 struct vfio_group *vfio_group; 285 struct kvm_vfio_group *kvg; 286 struct fd f; 287 struct iommu_group *grp; 288 289 if (copy_from_user(¶m, (void __user *)arg, 290 sizeof(struct kvm_vfio_spapr_tce))) 291 return -EFAULT; 292 293 f = fdget(param.groupfd); 294 if (!f.file) 295 return -EBADF; 296 297 vfio_group = kvm_vfio_group_get_external_user(f.file); 298 fdput(f); 299 300 if (IS_ERR(vfio_group)) 301 return PTR_ERR(vfio_group); 302 303 grp = kvm_vfio_group_get_iommu_group(vfio_group); 304 if (WARN_ON_ONCE(!grp)) { 305 kvm_vfio_group_put_external_user(vfio_group); 306 return -EIO; 307 } 308 309 ret = -ENOENT; 310 311 mutex_lock(&kv->lock); 312 313 list_for_each_entry(kvg, &kv->group_list, node) { 314 if (kvg->vfio_group != vfio_group) 315 continue; 316 317 ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, 318 param.tablefd, grp); 319 break; 320 } 321 322 mutex_unlock(&kv->lock); 323 324 iommu_group_put(grp); 325 kvm_vfio_group_put_external_user(vfio_group); 326 327 return ret; 328 } 329 #endif /* CONFIG_SPAPR_TCE_IOMMU */ 330 } 331 332 return -ENXIO; 333 } 334 335 static int kvm_vfio_set_attr(struct kvm_device *dev, 336 struct kvm_device_attr *attr) 337 { 338 switch (attr->group) { 339 case KVM_DEV_VFIO_GROUP: 340 return kvm_vfio_set_group(dev, attr->attr, attr->addr); 341 } 342 343 return -ENXIO; 344 } 345 346 static int kvm_vfio_has_attr(struct kvm_device *dev, 347 struct kvm_device_attr *attr) 348 { 349 switch (attr->group) { 350 case KVM_DEV_VFIO_GROUP: 351 switch (attr->attr) { 352 case KVM_DEV_VFIO_GROUP_ADD: 353 case KVM_DEV_VFIO_GROUP_DEL: 354 #ifdef CONFIG_SPAPR_TCE_IOMMU 355 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: 356 #endif 357 return 0; 358 } 359 360 break; 361 } 362 363 return -ENXIO; 364 } 365 366 static void kvm_vfio_destroy(struct kvm_device *dev) 367 { 368 struct kvm_vfio *kv = dev->private; 369 struct kvm_vfio_group *kvg, *tmp; 370 371 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) { 372 #ifdef CONFIG_SPAPR_TCE_IOMMU 373 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group); 374 #endif 375 kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); 376 kvm_vfio_group_put_external_user(kvg->vfio_group); 377 list_del(&kvg->node); 378 kfree(kvg); 379 kvm_arch_end_assignment(dev->kvm); 380 } 381 382 kvm_vfio_update_coherency(dev); 383 384 kfree(kv); 385 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */ 386 } 387 388 static int kvm_vfio_create(struct kvm_device *dev, u32 type); 389 390 static struct kvm_device_ops kvm_vfio_ops = { 391 .name = "kvm-vfio", 392 .create = kvm_vfio_create, 393 .destroy = kvm_vfio_destroy, 394 .set_attr = kvm_vfio_set_attr, 395 .has_attr = kvm_vfio_has_attr, 396 }; 397 398 static int kvm_vfio_create(struct kvm_device *dev, u32 type) 399 { 400 struct kvm_device *tmp; 401 struct kvm_vfio *kv; 402 403 /* Only one VFIO "device" per VM */ 404 list_for_each_entry(tmp, &dev->kvm->devices, vm_node) 405 if (tmp->ops == &kvm_vfio_ops) 406 return -EBUSY; 407 408 kv = kzalloc(sizeof(*kv), GFP_KERNEL); 409 if (!kv) 410 return -ENOMEM; 411 412 INIT_LIST_HEAD(&kv->group_list); 413 mutex_init(&kv->lock); 414 415 dev->private = kv; 416 417 return 0; 418 } 419 420 int kvm_vfio_ops_init(void) 421 { 422 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); 423 } 424 425 void kvm_vfio_ops_exit(void) 426 { 427 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); 428 } 429