1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VDPA device simulator core. 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 */ 9 10 #include <linux/init.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/sched.h> 16 #include <linux/dma-map-ops.h> 17 #include <linux/vringh.h> 18 #include <linux/vdpa.h> 19 #include <linux/vhost_iotlb.h> 20 #include <linux/iova.h> 21 22 #include "vdpa_sim.h" 23 24 #define DRV_VERSION "0.1" 25 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>" 26 #define DRV_DESC "vDPA Device Simulator core" 27 #define DRV_LICENSE "GPL v2" 28 29 static int batch_mapping = 1; 30 module_param(batch_mapping, int, 0444); 31 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable"); 32 33 static int max_iotlb_entries = 2048; 34 module_param(max_iotlb_entries, int, 0444); 35 MODULE_PARM_DESC(max_iotlb_entries, 36 "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)"); 37 38 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE 39 #define VDPASIM_QUEUE_MAX 256 40 #define VDPASIM_VENDOR_ID 0 41 42 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) 43 { 44 return container_of(vdpa, struct vdpasim, vdpa); 45 } 46 47 static struct vdpasim *dev_to_sim(struct device *dev) 48 { 49 struct vdpa_device *vdpa = dev_to_vdpa(dev); 50 51 return vdpa_to_sim(vdpa); 52 } 53 54 static void vdpasim_vq_notify(struct vringh *vring) 55 { 56 struct vdpasim_virtqueue *vq = 57 container_of(vring, struct vdpasim_virtqueue, vring); 58 59 if (!vq->cb) 60 return; 61 62 vq->cb(vq->private); 63 } 64 65 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) 66 { 67 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 68 69 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, 70 VDPASIM_QUEUE_MAX, false, 71 (struct vring_desc *)(uintptr_t)vq->desc_addr, 72 (struct vring_avail *) 73 (uintptr_t)vq->driver_addr, 74 (struct vring_used *) 75 (uintptr_t)vq->device_addr); 76 77 vq->vring.notify = vdpasim_vq_notify; 78 } 79 80 static void vdpasim_vq_reset(struct vdpasim *vdpasim, 81 struct vdpasim_virtqueue *vq) 82 { 83 vq->ready = false; 84 vq->desc_addr = 0; 85 vq->driver_addr = 0; 86 vq->device_addr = 0; 87 vq->cb = NULL; 88 vq->private = NULL; 89 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, 90 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL); 91 92 vq->vring.notify = NULL; 93 } 94 95 static void vdpasim_do_reset(struct vdpasim *vdpasim) 96 { 97 int i; 98 99 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) 100 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]); 101 102 spin_lock(&vdpasim->iommu_lock); 103 vhost_iotlb_reset(vdpasim->iommu); 104 spin_unlock(&vdpasim->iommu_lock); 105 106 vdpasim->features = 0; 107 vdpasim->status = 0; 108 ++vdpasim->generation; 109 } 110 111 static int dir_to_perm(enum dma_data_direction dir) 112 { 113 int perm = -EFAULT; 114 115 switch (dir) { 116 case DMA_FROM_DEVICE: 117 perm = VHOST_MAP_WO; 118 break; 119 case DMA_TO_DEVICE: 120 perm = VHOST_MAP_RO; 121 break; 122 case DMA_BIDIRECTIONAL: 123 perm = VHOST_MAP_RW; 124 break; 125 default: 126 break; 127 } 128 129 return perm; 130 } 131 132 static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr, 133 size_t size, unsigned int perm) 134 { 135 struct iova *iova; 136 dma_addr_t dma_addr; 137 int ret; 138 139 /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */ 140 iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova), 141 ULONG_MAX - 1, true); 142 if (!iova) 143 return DMA_MAPPING_ERROR; 144 145 dma_addr = iova_dma_addr(&vdpasim->iova, iova); 146 147 spin_lock(&vdpasim->iommu_lock); 148 ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr, 149 (u64)dma_addr + size - 1, (u64)paddr, perm); 150 spin_unlock(&vdpasim->iommu_lock); 151 152 if (ret) { 153 __free_iova(&vdpasim->iova, iova); 154 return DMA_MAPPING_ERROR; 155 } 156 157 return dma_addr; 158 } 159 160 static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr, 161 size_t size) 162 { 163 spin_lock(&vdpasim->iommu_lock); 164 vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr, 165 (u64)dma_addr + size - 1); 166 spin_unlock(&vdpasim->iommu_lock); 167 168 free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr)); 169 } 170 171 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, 172 unsigned long offset, size_t size, 173 enum dma_data_direction dir, 174 unsigned long attrs) 175 { 176 struct vdpasim *vdpasim = dev_to_sim(dev); 177 phys_addr_t paddr = page_to_phys(page) + offset; 178 int perm = dir_to_perm(dir); 179 180 if (perm < 0) 181 return DMA_MAPPING_ERROR; 182 183 return vdpasim_map_range(vdpasim, paddr, size, perm); 184 } 185 186 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, 187 size_t size, enum dma_data_direction dir, 188 unsigned long attrs) 189 { 190 struct vdpasim *vdpasim = dev_to_sim(dev); 191 192 vdpasim_unmap_range(vdpasim, dma_addr, size); 193 } 194 195 static void *vdpasim_alloc_coherent(struct device *dev, size_t size, 196 dma_addr_t *dma_addr, gfp_t flag, 197 unsigned long attrs) 198 { 199 struct vdpasim *vdpasim = dev_to_sim(dev); 200 phys_addr_t paddr; 201 void *addr; 202 203 addr = kmalloc(size, flag); 204 if (!addr) { 205 *dma_addr = DMA_MAPPING_ERROR; 206 return NULL; 207 } 208 209 paddr = virt_to_phys(addr); 210 211 *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW); 212 if (*dma_addr == DMA_MAPPING_ERROR) { 213 kfree(addr); 214 return NULL; 215 } 216 217 return addr; 218 } 219 220 static void vdpasim_free_coherent(struct device *dev, size_t size, 221 void *vaddr, dma_addr_t dma_addr, 222 unsigned long attrs) 223 { 224 struct vdpasim *vdpasim = dev_to_sim(dev); 225 226 vdpasim_unmap_range(vdpasim, dma_addr, size); 227 228 kfree(vaddr); 229 } 230 231 static const struct dma_map_ops vdpasim_dma_ops = { 232 .map_page = vdpasim_map_page, 233 .unmap_page = vdpasim_unmap_page, 234 .alloc = vdpasim_alloc_coherent, 235 .free = vdpasim_free_coherent, 236 }; 237 238 static const struct vdpa_config_ops vdpasim_config_ops; 239 static const struct vdpa_config_ops vdpasim_batch_config_ops; 240 241 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) 242 { 243 const struct vdpa_config_ops *ops; 244 struct vdpasim *vdpasim; 245 struct device *dev; 246 int i, ret = -ENOMEM; 247 248 if (batch_mapping) 249 ops = &vdpasim_batch_config_ops; 250 else 251 ops = &vdpasim_config_ops; 252 253 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 254 dev_attr->name, false); 255 if (IS_ERR(vdpasim)) { 256 ret = PTR_ERR(vdpasim); 257 goto err_alloc; 258 } 259 260 vdpasim->dev_attr = *dev_attr; 261 INIT_WORK(&vdpasim->work, dev_attr->work_fn); 262 spin_lock_init(&vdpasim->lock); 263 spin_lock_init(&vdpasim->iommu_lock); 264 265 dev = &vdpasim->vdpa.dev; 266 dev->dma_mask = &dev->coherent_dma_mask; 267 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 268 goto err_iommu; 269 set_dma_ops(dev, &vdpasim_dma_ops); 270 vdpasim->vdpa.mdev = dev_attr->mgmt_dev; 271 272 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); 273 if (!vdpasim->config) 274 goto err_iommu; 275 276 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), 277 GFP_KERNEL); 278 if (!vdpasim->vqs) 279 goto err_iommu; 280 281 vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0); 282 if (!vdpasim->iommu) 283 goto err_iommu; 284 285 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL); 286 if (!vdpasim->buffer) 287 goto err_iommu; 288 289 for (i = 0; i < dev_attr->nvqs; i++) 290 vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu, 291 &vdpasim->iommu_lock); 292 293 ret = iova_cache_get(); 294 if (ret) 295 goto err_iommu; 296 297 /* For simplicity we use an IOVA allocator with byte granularity */ 298 init_iova_domain(&vdpasim->iova, 1, 0); 299 300 vdpasim->vdpa.dma_dev = dev; 301 302 return vdpasim; 303 304 err_iommu: 305 put_device(dev); 306 err_alloc: 307 return ERR_PTR(ret); 308 } 309 EXPORT_SYMBOL_GPL(vdpasim_create); 310 311 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx, 312 u64 desc_area, u64 driver_area, 313 u64 device_area) 314 { 315 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 316 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 317 318 vq->desc_addr = desc_area; 319 vq->driver_addr = driver_area; 320 vq->device_addr = device_area; 321 322 return 0; 323 } 324 325 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) 326 { 327 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 328 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 329 330 vq->num = num; 331 } 332 333 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx) 334 { 335 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 336 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 337 338 if (vq->ready) 339 schedule_work(&vdpasim->work); 340 } 341 342 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx, 343 struct vdpa_callback *cb) 344 { 345 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 346 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 347 348 vq->cb = cb->callback; 349 vq->private = cb->private; 350 } 351 352 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) 353 { 354 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 355 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 356 357 spin_lock(&vdpasim->lock); 358 vq->ready = ready; 359 if (vq->ready) 360 vdpasim_queue_ready(vdpasim, idx); 361 spin_unlock(&vdpasim->lock); 362 } 363 364 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) 365 { 366 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 367 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 368 369 return vq->ready; 370 } 371 372 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, 373 const struct vdpa_vq_state *state) 374 { 375 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 376 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 377 struct vringh *vrh = &vq->vring; 378 379 spin_lock(&vdpasim->lock); 380 vrh->last_avail_idx = state->split.avail_index; 381 spin_unlock(&vdpasim->lock); 382 383 return 0; 384 } 385 386 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, 387 struct vdpa_vq_state *state) 388 { 389 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 390 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 391 struct vringh *vrh = &vq->vring; 392 393 state->split.avail_index = vrh->last_avail_idx; 394 return 0; 395 } 396 397 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) 398 { 399 return VDPASIM_QUEUE_ALIGN; 400 } 401 402 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa) 403 { 404 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 405 406 return vdpasim->dev_attr.supported_features; 407 } 408 409 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features) 410 { 411 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 412 413 /* DMA mapping must be done by driver */ 414 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) 415 return -EINVAL; 416 417 vdpasim->features = features & vdpasim->dev_attr.supported_features; 418 419 return 0; 420 } 421 422 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa) 423 { 424 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 425 426 return vdpasim->features; 427 } 428 429 static void vdpasim_set_config_cb(struct vdpa_device *vdpa, 430 struct vdpa_callback *cb) 431 { 432 /* We don't support config interrupt */ 433 } 434 435 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa) 436 { 437 return VDPASIM_QUEUE_MAX; 438 } 439 440 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa) 441 { 442 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 443 444 return vdpasim->dev_attr.id; 445 } 446 447 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa) 448 { 449 return VDPASIM_VENDOR_ID; 450 } 451 452 static u8 vdpasim_get_status(struct vdpa_device *vdpa) 453 { 454 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 455 u8 status; 456 457 spin_lock(&vdpasim->lock); 458 status = vdpasim->status; 459 spin_unlock(&vdpasim->lock); 460 461 return status; 462 } 463 464 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) 465 { 466 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 467 468 spin_lock(&vdpasim->lock); 469 vdpasim->status = status; 470 spin_unlock(&vdpasim->lock); 471 } 472 473 static int vdpasim_reset(struct vdpa_device *vdpa) 474 { 475 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 476 477 spin_lock(&vdpasim->lock); 478 vdpasim->status = 0; 479 vdpasim_do_reset(vdpasim); 480 spin_unlock(&vdpasim->lock); 481 482 return 0; 483 } 484 485 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa) 486 { 487 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 488 489 return vdpasim->dev_attr.config_size; 490 } 491 492 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, 493 void *buf, unsigned int len) 494 { 495 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 496 497 if (offset + len > vdpasim->dev_attr.config_size) 498 return; 499 500 if (vdpasim->dev_attr.get_config) 501 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config); 502 503 memcpy(buf, vdpasim->config + offset, len); 504 } 505 506 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset, 507 const void *buf, unsigned int len) 508 { 509 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 510 511 if (offset + len > vdpasim->dev_attr.config_size) 512 return; 513 514 memcpy(vdpasim->config + offset, buf, len); 515 516 if (vdpasim->dev_attr.set_config) 517 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config); 518 } 519 520 static u32 vdpasim_get_generation(struct vdpa_device *vdpa) 521 { 522 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 523 524 return vdpasim->generation; 525 } 526 527 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa) 528 { 529 struct vdpa_iova_range range = { 530 .first = 0ULL, 531 .last = ULLONG_MAX, 532 }; 533 534 return range; 535 } 536 537 static int vdpasim_set_map(struct vdpa_device *vdpa, 538 struct vhost_iotlb *iotlb) 539 { 540 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 541 struct vhost_iotlb_map *map; 542 u64 start = 0ULL, last = 0ULL - 1; 543 int ret; 544 545 spin_lock(&vdpasim->iommu_lock); 546 vhost_iotlb_reset(vdpasim->iommu); 547 548 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; 549 map = vhost_iotlb_itree_next(map, start, last)) { 550 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start, 551 map->last, map->addr, map->perm); 552 if (ret) 553 goto err; 554 } 555 spin_unlock(&vdpasim->iommu_lock); 556 return 0; 557 558 err: 559 vhost_iotlb_reset(vdpasim->iommu); 560 spin_unlock(&vdpasim->iommu_lock); 561 return ret; 562 } 563 564 static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size, 565 u64 pa, u32 perm, void *opaque) 566 { 567 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 568 int ret; 569 570 spin_lock(&vdpasim->iommu_lock); 571 ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1, 572 pa, perm, opaque); 573 spin_unlock(&vdpasim->iommu_lock); 574 575 return ret; 576 } 577 578 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) 579 { 580 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 581 582 spin_lock(&vdpasim->iommu_lock); 583 vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); 584 spin_unlock(&vdpasim->iommu_lock); 585 586 return 0; 587 } 588 589 static void vdpasim_free(struct vdpa_device *vdpa) 590 { 591 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 592 int i; 593 594 cancel_work_sync(&vdpasim->work); 595 596 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { 597 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov); 598 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); 599 } 600 601 if (vdpa_get_dma_dev(vdpa)) { 602 put_iova_domain(&vdpasim->iova); 603 iova_cache_put(); 604 } 605 606 kvfree(vdpasim->buffer); 607 if (vdpasim->iommu) 608 vhost_iotlb_free(vdpasim->iommu); 609 kfree(vdpasim->vqs); 610 kfree(vdpasim->config); 611 } 612 613 static const struct vdpa_config_ops vdpasim_config_ops = { 614 .set_vq_address = vdpasim_set_vq_address, 615 .set_vq_num = vdpasim_set_vq_num, 616 .kick_vq = vdpasim_kick_vq, 617 .set_vq_cb = vdpasim_set_vq_cb, 618 .set_vq_ready = vdpasim_set_vq_ready, 619 .get_vq_ready = vdpasim_get_vq_ready, 620 .set_vq_state = vdpasim_set_vq_state, 621 .get_vq_state = vdpasim_get_vq_state, 622 .get_vq_align = vdpasim_get_vq_align, 623 .get_device_features = vdpasim_get_device_features, 624 .set_driver_features = vdpasim_set_driver_features, 625 .get_driver_features = vdpasim_get_driver_features, 626 .set_config_cb = vdpasim_set_config_cb, 627 .get_vq_num_max = vdpasim_get_vq_num_max, 628 .get_device_id = vdpasim_get_device_id, 629 .get_vendor_id = vdpasim_get_vendor_id, 630 .get_status = vdpasim_get_status, 631 .set_status = vdpasim_set_status, 632 .reset = vdpasim_reset, 633 .get_config_size = vdpasim_get_config_size, 634 .get_config = vdpasim_get_config, 635 .set_config = vdpasim_set_config, 636 .get_generation = vdpasim_get_generation, 637 .get_iova_range = vdpasim_get_iova_range, 638 .dma_map = vdpasim_dma_map, 639 .dma_unmap = vdpasim_dma_unmap, 640 .free = vdpasim_free, 641 }; 642 643 static const struct vdpa_config_ops vdpasim_batch_config_ops = { 644 .set_vq_address = vdpasim_set_vq_address, 645 .set_vq_num = vdpasim_set_vq_num, 646 .kick_vq = vdpasim_kick_vq, 647 .set_vq_cb = vdpasim_set_vq_cb, 648 .set_vq_ready = vdpasim_set_vq_ready, 649 .get_vq_ready = vdpasim_get_vq_ready, 650 .set_vq_state = vdpasim_set_vq_state, 651 .get_vq_state = vdpasim_get_vq_state, 652 .get_vq_align = vdpasim_get_vq_align, 653 .get_device_features = vdpasim_get_device_features, 654 .set_driver_features = vdpasim_set_driver_features, 655 .get_driver_features = vdpasim_get_driver_features, 656 .set_config_cb = vdpasim_set_config_cb, 657 .get_vq_num_max = vdpasim_get_vq_num_max, 658 .get_device_id = vdpasim_get_device_id, 659 .get_vendor_id = vdpasim_get_vendor_id, 660 .get_status = vdpasim_get_status, 661 .set_status = vdpasim_set_status, 662 .reset = vdpasim_reset, 663 .get_config_size = vdpasim_get_config_size, 664 .get_config = vdpasim_get_config, 665 .set_config = vdpasim_set_config, 666 .get_generation = vdpasim_get_generation, 667 .get_iova_range = vdpasim_get_iova_range, 668 .set_map = vdpasim_set_map, 669 .free = vdpasim_free, 670 }; 671 672 MODULE_VERSION(DRV_VERSION); 673 MODULE_LICENSE(DRV_LICENSE); 674 MODULE_AUTHOR(DRV_AUTHOR); 675 MODULE_DESCRIPTION(DRV_DESC); 676