1 /* 2 * VFIO device 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 24 #include "hw/vfio/vfio-device.h" 25 #include "hw/vfio/pci.h" 26 #include "hw/hw.h" 27 #include "trace.h" 28 #include "qapi/error.h" 29 #include "qemu/error-report.h" 30 #include "qemu/units.h" 31 #include "migration/cpr.h" 32 #include "migration/blocker.h" 33 #include "monitor/monitor.h" 34 #include "vfio-helpers.h" 35 36 VFIODeviceList vfio_device_list = 37 QLIST_HEAD_INITIALIZER(vfio_device_list); 38 39 /* 40 * We want to differentiate hot reset of multiple in-use devices vs 41 * hot reset of a single in-use device. VFIO_DEVICE_RESET will already 42 * handle the case of doing hot resets when there is only a single 43 * device per bus. The in-use here refers to how many VFIODevices are 44 * affected. A hot reset that affects multiple devices, but only a 45 * single in-use device, means that we can call it from our bus 46 * ->reset() callback since the extent is effectively a single 47 * device. This allows us to make use of it in the hotplug path. When 48 * there are multiple in-use devices, we can only trigger the hot 49 * reset during a system reset and thus from our reset handler. We 50 * separate _one vs _multi here so that we don't overlap and do a 51 * double reset on the system reset path where both our reset handler 52 * and ->reset() callback are used. Calling _one() will only do a hot 53 * reset for the one in-use devices case, calling _multi() will do 54 * nothing if a _one() would have been sufficient. 55 */ 56 void vfio_device_reset_handler(void *opaque) 57 { 58 VFIODevice *vbasedev; 59 60 trace_vfio_device_reset_handler(); 61 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { 62 if (vbasedev->dev->realized) { 63 vbasedev->ops->vfio_compute_needs_reset(vbasedev); 64 } 65 } 66 67 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { 68 if (vbasedev->dev->realized && vbasedev->needs_reset) { 69 vbasedev->ops->vfio_hot_reset_multi(vbasedev); 70 } 71 } 72 } 73 74 /* 75 * Common VFIO interrupt disable 76 */ 77 void vfio_device_irq_disable(VFIODevice *vbasedev, int index) 78 { 79 struct vfio_irq_set irq_set = { 80 .argsz = sizeof(irq_set), 81 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, 82 .index = index, 83 .start = 0, 84 .count = 0, 85 }; 86 87 vbasedev->io_ops->set_irqs(vbasedev, &irq_set); 88 } 89 90 void vfio_device_irq_unmask(VFIODevice *vbasedev, int index) 91 { 92 struct vfio_irq_set irq_set = { 93 .argsz = sizeof(irq_set), 94 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, 95 .index = index, 96 .start = 0, 97 .count = 1, 98 }; 99 100 vbasedev->io_ops->set_irqs(vbasedev, &irq_set); 101 } 102 103 void vfio_device_irq_mask(VFIODevice *vbasedev, int index) 104 { 105 struct vfio_irq_set irq_set = { 106 .argsz = sizeof(irq_set), 107 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, 108 .index = index, 109 .start = 0, 110 .count = 1, 111 }; 112 113 vbasedev->io_ops->set_irqs(vbasedev, &irq_set); 114 } 115 116 static inline const char *action_to_str(int action) 117 { 118 switch (action) { 119 case VFIO_IRQ_SET_ACTION_MASK: 120 return "MASK"; 121 case VFIO_IRQ_SET_ACTION_UNMASK: 122 return "UNMASK"; 123 case VFIO_IRQ_SET_ACTION_TRIGGER: 124 return "TRIGGER"; 125 default: 126 return "UNKNOWN ACTION"; 127 } 128 } 129 130 static const char *index_to_str(VFIODevice *vbasedev, int index) 131 { 132 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { 133 return NULL; 134 } 135 136 switch (index) { 137 case VFIO_PCI_INTX_IRQ_INDEX: 138 return "INTX"; 139 case VFIO_PCI_MSI_IRQ_INDEX: 140 return "MSI"; 141 case VFIO_PCI_MSIX_IRQ_INDEX: 142 return "MSIX"; 143 case VFIO_PCI_ERR_IRQ_INDEX: 144 return "ERR"; 145 case VFIO_PCI_REQ_IRQ_INDEX: 146 return "REQ"; 147 default: 148 return NULL; 149 } 150 } 151 152 bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex, 153 int action, int fd, Error **errp) 154 { 155 ERRP_GUARD(); 156 g_autofree struct vfio_irq_set *irq_set = NULL; 157 int argsz; 158 const char *name; 159 int32_t *pfd; 160 161 argsz = sizeof(*irq_set) + sizeof(*pfd); 162 163 irq_set = g_malloc0(argsz); 164 irq_set->argsz = argsz; 165 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; 166 irq_set->index = index; 167 irq_set->start = subindex; 168 irq_set->count = 1; 169 pfd = (int32_t *)&irq_set->data; 170 *pfd = fd; 171 172 if (!vbasedev->io_ops->set_irqs(vbasedev, irq_set)) { 173 return true; 174 } 175 176 error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure"); 177 178 name = index_to_str(vbasedev, index); 179 if (name) { 180 error_prepend(errp, "%s-%d: ", name, subindex); 181 } else { 182 error_prepend(errp, "index %d-%d: ", index, subindex); 183 } 184 error_prepend(errp, 185 "Failed to %s %s eventfd signaling for interrupt ", 186 fd < 0 ? "tear down" : "set up", action_to_str(action)); 187 return false; 188 } 189 190 int vfio_device_get_irq_info(VFIODevice *vbasedev, int index, 191 struct vfio_irq_info *info) 192 { 193 memset(info, 0, sizeof(*info)); 194 195 info->argsz = sizeof(*info); 196 info->index = index; 197 198 return vbasedev->io_ops->get_irq_info(vbasedev, info); 199 } 200 201 int vfio_device_get_region_info(VFIODevice *vbasedev, int index, 202 struct vfio_region_info **info) 203 { 204 size_t argsz = sizeof(struct vfio_region_info); 205 int fd = -1; 206 int ret; 207 208 /* check cache */ 209 if (vbasedev->reginfo[index] != NULL) { 210 *info = vbasedev->reginfo[index]; 211 return 0; 212 } 213 214 *info = g_malloc0(argsz); 215 216 (*info)->index = index; 217 retry: 218 (*info)->argsz = argsz; 219 220 ret = vbasedev->io_ops->get_region_info(vbasedev, *info, &fd); 221 if (ret != 0) { 222 g_free(*info); 223 *info = NULL; 224 return ret; 225 } 226 227 if ((*info)->argsz > argsz) { 228 argsz = (*info)->argsz; 229 *info = g_realloc(*info, argsz); 230 231 if (fd != -1) { 232 close(fd); 233 fd = -1; 234 } 235 236 goto retry; 237 } 238 239 /* fill cache */ 240 vbasedev->reginfo[index] = *info; 241 if (vbasedev->region_fds != NULL) { 242 vbasedev->region_fds[index] = fd; 243 } 244 245 return 0; 246 } 247 248 int vfio_device_get_region_fd(VFIODevice *vbasedev, int index) 249 { 250 return vbasedev->region_fds ? 251 vbasedev->region_fds[index] : 252 vbasedev->fd; 253 } 254 255 int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type, 256 uint32_t subtype, struct vfio_region_info **info) 257 { 258 int i; 259 260 for (i = 0; i < vbasedev->num_regions; i++) { 261 struct vfio_info_cap_header *hdr; 262 struct vfio_region_info_cap_type *cap_type; 263 264 if (vfio_device_get_region_info(vbasedev, i, info)) { 265 continue; 266 } 267 268 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); 269 if (!hdr) { 270 continue; 271 } 272 273 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); 274 275 trace_vfio_device_get_region_info_type(vbasedev->name, i, 276 cap_type->type, cap_type->subtype); 277 278 if (cap_type->type == type && cap_type->subtype == subtype) { 279 return 0; 280 } 281 } 282 283 *info = NULL; 284 return -ENODEV; 285 } 286 287 bool vfio_device_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) 288 { 289 struct vfio_region_info *info = NULL; 290 bool ret = false; 291 292 if (!vfio_device_get_region_info(vbasedev, region, &info)) { 293 if (vfio_get_region_info_cap(info, cap_type)) { 294 ret = true; 295 } 296 } 297 298 return ret; 299 } 300 301 bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp) 302 { 303 ERRP_GUARD(); 304 struct stat st; 305 306 if (vbasedev->fd < 0) { 307 if (stat(vbasedev->sysfsdev, &st) < 0) { 308 error_setg_errno(errp, errno, "no such host device"); 309 error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); 310 return false; 311 } 312 /* User may specify a name, e.g: VFIO platform device */ 313 if (!vbasedev->name) { 314 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); 315 } 316 } else { 317 if (!vbasedev->iommufd) { 318 error_setg(errp, "Use FD passing only with iommufd backend"); 319 return false; 320 } 321 if (!vbasedev->name) { 322 323 if (vbasedev->dev->id) { 324 vbasedev->name = g_strdup(vbasedev->dev->id); 325 return true; 326 } else { 327 /* 328 * Assign a name so any function printing it will not break. 329 * The fd number changes across processes, so this cannot be 330 * used as an invariant name for CPR. 331 */ 332 vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd); 333 error_setg(&vbasedev->cpr.id_blocker, 334 "vfio device with fd=%d needs an id property", 335 vbasedev->fd); 336 return migrate_add_blocker_modes(&vbasedev->cpr.id_blocker, 337 errp, MIG_MODE_CPR_TRANSFER, 338 -1) == 0; 339 } 340 } 341 } 342 343 return true; 344 } 345 346 void vfio_device_free_name(VFIODevice *vbasedev) 347 { 348 g_clear_pointer(&vbasedev->name, g_free); 349 migrate_del_blocker(&vbasedev->cpr.id_blocker); 350 } 351 352 void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp) 353 { 354 vbasedev->fd = cpr_get_fd_param(vbasedev->dev->id, str, 0, errp); 355 } 356 357 static VFIODeviceIOOps vfio_device_io_ops_ioctl; 358 359 void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, 360 DeviceState *dev, bool ram_discard) 361 { 362 vbasedev->type = type; 363 vbasedev->ops = ops; 364 vbasedev->io_ops = &vfio_device_io_ops_ioctl; 365 vbasedev->dev = dev; 366 vbasedev->fd = -1; 367 vbasedev->use_region_fds = false; 368 369 vbasedev->ram_block_discard_allowed = ram_discard; 370 } 371 372 int vfio_device_get_aw_bits(VFIODevice *vdev) 373 { 374 /* 375 * iova_ranges is a sorted list. For old kernels that support 376 * VFIO but not support query of iova ranges, iova_ranges is NULL, 377 * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned. 378 */ 379 GList *l = g_list_last(vdev->bcontainer->iova_ranges); 380 381 if (l) { 382 Range *range = l->data; 383 return range_get_last_bit(range) + 1; 384 } 385 386 return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX; 387 } 388 389 bool vfio_device_is_mdev(VFIODevice *vbasedev) 390 { 391 g_autofree char *subsys = NULL; 392 g_autofree char *tmp = NULL; 393 394 if (!vbasedev->sysfsdev) { 395 return false; 396 } 397 398 tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev); 399 subsys = realpath(tmp, NULL); 400 return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0); 401 } 402 403 bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev, 404 const char *typename, Error **errp) 405 { 406 HostIOMMUDevice *hiod; 407 408 if (vbasedev->mdev) { 409 return true; 410 } 411 412 hiod = HOST_IOMMU_DEVICE(object_new(typename)); 413 414 if (!HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp)) { 415 object_unref(hiod); 416 return false; 417 } 418 419 vbasedev->hiod = hiod; 420 return true; 421 } 422 423 VFIODevice *vfio_get_vfio_device(Object *obj) 424 { 425 if (object_dynamic_cast(obj, TYPE_VFIO_PCI)) { 426 return &VFIO_PCI_BASE(obj)->vbasedev; 427 } else { 428 return NULL; 429 } 430 } 431 432 bool vfio_device_attach_by_iommu_type(const char *iommu_type, char *name, 433 VFIODevice *vbasedev, AddressSpace *as, 434 Error **errp) 435 { 436 const VFIOIOMMUClass *ops = 437 VFIO_IOMMU_CLASS(object_class_by_name(iommu_type)); 438 439 assert(ops); 440 441 return ops->attach_device(name, vbasedev, as, errp); 442 } 443 444 bool vfio_device_attach(char *name, VFIODevice *vbasedev, 445 AddressSpace *as, Error **errp) 446 { 447 const char *iommu_type = vbasedev->iommufd ? 448 TYPE_VFIO_IOMMU_IOMMUFD : 449 TYPE_VFIO_IOMMU_LEGACY; 450 451 return vfio_device_attach_by_iommu_type(iommu_type, name, vbasedev, 452 as, errp); 453 } 454 455 void vfio_device_detach(VFIODevice *vbasedev) 456 { 457 if (!vbasedev->bcontainer) { 458 return; 459 } 460 VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev); 461 } 462 463 void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer, 464 struct vfio_device_info *info) 465 { 466 vbasedev->num_irqs = info->num_irqs; 467 vbasedev->num_regions = info->num_regions; 468 vbasedev->flags = info->flags; 469 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET); 470 471 vbasedev->bcontainer = bcontainer; 472 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); 473 474 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); 475 476 vbasedev->reginfo = g_new0(struct vfio_region_info *, 477 vbasedev->num_regions); 478 if (vbasedev->use_region_fds) { 479 vbasedev->region_fds = g_new0(int, vbasedev->num_regions); 480 } 481 } 482 483 void vfio_device_unprepare(VFIODevice *vbasedev) 484 { 485 int i; 486 487 for (i = 0; i < vbasedev->num_regions; i++) { 488 g_free(vbasedev->reginfo[i]); 489 if (vbasedev->region_fds != NULL && vbasedev->region_fds[i] != -1) { 490 close(vbasedev->region_fds[i]); 491 } 492 493 } 494 495 g_clear_pointer(&vbasedev->reginfo, g_free); 496 g_clear_pointer(&vbasedev->region_fds, g_free); 497 498 QLIST_REMOVE(vbasedev, container_next); 499 QLIST_REMOVE(vbasedev, global_next); 500 vbasedev->bcontainer = NULL; 501 } 502 503 /* 504 * Traditional ioctl() based io 505 */ 506 507 static int vfio_device_io_device_feature(VFIODevice *vbasedev, 508 struct vfio_device_feature *feature) 509 { 510 int ret; 511 512 ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature); 513 514 return ret < 0 ? -errno : ret; 515 } 516 517 static int vfio_device_io_get_region_info(VFIODevice *vbasedev, 518 struct vfio_region_info *info, 519 int *fd) 520 { 521 int ret; 522 523 *fd = -1; 524 525 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, info); 526 527 return ret < 0 ? -errno : ret; 528 } 529 530 static int vfio_device_io_get_irq_info(VFIODevice *vbasedev, 531 struct vfio_irq_info *info) 532 { 533 int ret; 534 535 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, info); 536 537 return ret < 0 ? -errno : ret; 538 } 539 540 static int vfio_device_io_set_irqs(VFIODevice *vbasedev, 541 struct vfio_irq_set *irqs) 542 { 543 int ret; 544 545 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irqs); 546 547 return ret < 0 ? -errno : ret; 548 } 549 550 static int vfio_device_io_region_read(VFIODevice *vbasedev, uint8_t index, 551 off_t off, uint32_t size, void *data) 552 { 553 struct vfio_region_info *info; 554 int ret; 555 556 ret = vfio_device_get_region_info(vbasedev, index, &info); 557 if (ret != 0) { 558 return ret; 559 } 560 561 ret = pread(vbasedev->fd, data, size, info->offset + off); 562 563 return ret < 0 ? -errno : ret; 564 } 565 566 static int vfio_device_io_region_write(VFIODevice *vbasedev, uint8_t index, 567 off_t off, uint32_t size, void *data, 568 bool post) 569 { 570 struct vfio_region_info *info; 571 int ret; 572 573 ret = vfio_device_get_region_info(vbasedev, index, &info); 574 if (ret != 0) { 575 return ret; 576 } 577 578 ret = pwrite(vbasedev->fd, data, size, info->offset + off); 579 580 return ret < 0 ? -errno : ret; 581 } 582 583 static VFIODeviceIOOps vfio_device_io_ops_ioctl = { 584 .device_feature = vfio_device_io_device_feature, 585 .get_region_info = vfio_device_io_get_region_info, 586 .get_irq_info = vfio_device_io_get_irq_info, 587 .set_irqs = vfio_device_io_set_irqs, 588 .region_read = vfio_device_io_region_read, 589 .region_write = vfio_device_io_region_write, 590 }; 591