1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VDPA networking device simulator. 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 */ 9 10 #include <linux/init.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/kernel.h> 14 #include <linux/fs.h> 15 #include <linux/poll.h> 16 #include <linux/slab.h> 17 #include <linux/sched.h> 18 #include <linux/wait.h> 19 #include <linux/uuid.h> 20 #include <linux/iommu.h> 21 #include <linux/dma-map-ops.h> 22 #include <linux/sysfs.h> 23 #include <linux/file.h> 24 #include <linux/etherdevice.h> 25 #include <linux/vringh.h> 26 #include <linux/vdpa.h> 27 #include <linux/virtio_byteorder.h> 28 #include <linux/vhost_iotlb.h> 29 #include <uapi/linux/virtio_config.h> 30 #include <uapi/linux/virtio_net.h> 31 32 #define DRV_VERSION "0.1" 33 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>" 34 #define DRV_DESC "vDPA Device Simulator" 35 #define DRV_LICENSE "GPL v2" 36 37 static int batch_mapping = 1; 38 module_param(batch_mapping, int, 0444); 39 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable"); 40 41 static char *macaddr; 42 module_param(macaddr, charp, 0); 43 MODULE_PARM_DESC(macaddr, "Ethernet MAC address"); 44 45 struct vdpasim_virtqueue { 46 struct vringh vring; 47 struct vringh_kiov iov; 48 unsigned short head; 49 bool ready; 50 u64 desc_addr; 51 u64 device_addr; 52 u64 driver_addr; 53 u32 num; 54 void *private; 55 irqreturn_t (*cb)(void *data); 56 }; 57 58 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE 59 #define VDPASIM_QUEUE_MAX 256 60 #define VDPASIM_DEVICE_ID 0x1 61 #define VDPASIM_VENDOR_ID 0 62 #define VDPASIM_VQ_NUM 0x2 63 #define VDPASIM_NAME "vdpasim-netdev" 64 65 static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | 66 (1ULL << VIRTIO_F_VERSION_1) | 67 (1ULL << VIRTIO_F_ACCESS_PLATFORM) | 68 (1ULL << VIRTIO_NET_F_MAC); 69 70 /* State of each vdpasim device */ 71 struct vdpasim { 72 struct vdpa_device vdpa; 73 struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM]; 74 struct work_struct work; 75 /* spinlock to synchronize virtqueue state */ 76 spinlock_t lock; 77 struct virtio_net_config config; 78 struct vhost_iotlb *iommu; 79 void *buffer; 80 u32 status; 81 u32 generation; 82 u64 features; 83 /* spinlock to synchronize iommu table */ 84 spinlock_t iommu_lock; 85 }; 86 87 /* TODO: cross-endian support */ 88 static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim) 89 { 90 return virtio_legacy_is_little_endian() || 91 (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1)); 92 } 93 94 static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val) 95 { 96 return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val); 97 } 98 99 static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val) 100 { 101 return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val); 102 } 103 104 static struct vdpasim *vdpasim_dev; 105 106 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) 107 { 108 return container_of(vdpa, struct vdpasim, vdpa); 109 } 110 111 static struct vdpasim *dev_to_sim(struct device *dev) 112 { 113 struct vdpa_device *vdpa = dev_to_vdpa(dev); 114 115 return vdpa_to_sim(vdpa); 116 } 117 118 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) 119 { 120 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 121 122 vringh_init_iotlb(&vq->vring, vdpasim_features, 123 VDPASIM_QUEUE_MAX, false, 124 (struct vring_desc *)(uintptr_t)vq->desc_addr, 125 (struct vring_avail *) 126 (uintptr_t)vq->driver_addr, 127 (struct vring_used *) 128 (uintptr_t)vq->device_addr); 129 } 130 131 static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq) 132 { 133 vq->ready = false; 134 vq->desc_addr = 0; 135 vq->driver_addr = 0; 136 vq->device_addr = 0; 137 vq->cb = NULL; 138 vq->private = NULL; 139 vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX, 140 false, NULL, NULL, NULL); 141 } 142 143 static void vdpasim_reset(struct vdpasim *vdpasim) 144 { 145 int i; 146 147 for (i = 0; i < VDPASIM_VQ_NUM; i++) 148 vdpasim_vq_reset(&vdpasim->vqs[i]); 149 150 spin_lock(&vdpasim->iommu_lock); 151 vhost_iotlb_reset(vdpasim->iommu); 152 spin_unlock(&vdpasim->iommu_lock); 153 154 vdpasim->features = 0; 155 vdpasim->status = 0; 156 ++vdpasim->generation; 157 } 158 159 static void vdpasim_work(struct work_struct *work) 160 { 161 struct vdpasim *vdpasim = container_of(work, struct 162 vdpasim, work); 163 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; 164 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; 165 ssize_t read, write; 166 size_t total_write; 167 int pkts = 0; 168 int err; 169 170 spin_lock(&vdpasim->lock); 171 172 if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) 173 goto out; 174 175 if (!txq->ready || !rxq->ready) 176 goto out; 177 178 while (true) { 179 total_write = 0; 180 err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL, 181 &txq->head, GFP_ATOMIC); 182 if (err <= 0) 183 break; 184 185 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov, 186 &rxq->head, GFP_ATOMIC); 187 if (err <= 0) { 188 vringh_complete_iotlb(&txq->vring, txq->head, 0); 189 break; 190 } 191 192 while (true) { 193 read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov, 194 vdpasim->buffer, 195 PAGE_SIZE); 196 if (read <= 0) 197 break; 198 199 write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov, 200 vdpasim->buffer, read); 201 if (write <= 0) 202 break; 203 204 total_write += write; 205 } 206 207 /* Make sure data is wrote before advancing index */ 208 smp_wmb(); 209 210 vringh_complete_iotlb(&txq->vring, txq->head, 0); 211 vringh_complete_iotlb(&rxq->vring, rxq->head, total_write); 212 213 /* Make sure used is visible before rasing the interrupt. */ 214 smp_wmb(); 215 216 local_bh_disable(); 217 if (txq->cb) 218 txq->cb(txq->private); 219 if (rxq->cb) 220 rxq->cb(rxq->private); 221 local_bh_enable(); 222 223 if (++pkts > 4) { 224 schedule_work(&vdpasim->work); 225 goto out; 226 } 227 } 228 229 out: 230 spin_unlock(&vdpasim->lock); 231 } 232 233 static int dir_to_perm(enum dma_data_direction dir) 234 { 235 int perm = -EFAULT; 236 237 switch (dir) { 238 case DMA_FROM_DEVICE: 239 perm = VHOST_MAP_WO; 240 break; 241 case DMA_TO_DEVICE: 242 perm = VHOST_MAP_RO; 243 break; 244 case DMA_BIDIRECTIONAL: 245 perm = VHOST_MAP_RW; 246 break; 247 default: 248 break; 249 } 250 251 return perm; 252 } 253 254 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, 255 unsigned long offset, size_t size, 256 enum dma_data_direction dir, 257 unsigned long attrs) 258 { 259 struct vdpasim *vdpasim = dev_to_sim(dev); 260 struct vhost_iotlb *iommu = vdpasim->iommu; 261 u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset; 262 int ret, perm = dir_to_perm(dir); 263 264 if (perm < 0) 265 return DMA_MAPPING_ERROR; 266 267 /* For simplicity, use identical mapping to avoid e.g iova 268 * allocator. 269 */ 270 spin_lock(&vdpasim->iommu_lock); 271 ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, 272 pa, dir_to_perm(dir)); 273 spin_unlock(&vdpasim->iommu_lock); 274 if (ret) 275 return DMA_MAPPING_ERROR; 276 277 return (dma_addr_t)(pa); 278 } 279 280 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, 281 size_t size, enum dma_data_direction dir, 282 unsigned long attrs) 283 { 284 struct vdpasim *vdpasim = dev_to_sim(dev); 285 struct vhost_iotlb *iommu = vdpasim->iommu; 286 287 spin_lock(&vdpasim->iommu_lock); 288 vhost_iotlb_del_range(iommu, (u64)dma_addr, 289 (u64)dma_addr + size - 1); 290 spin_unlock(&vdpasim->iommu_lock); 291 } 292 293 static void *vdpasim_alloc_coherent(struct device *dev, size_t size, 294 dma_addr_t *dma_addr, gfp_t flag, 295 unsigned long attrs) 296 { 297 struct vdpasim *vdpasim = dev_to_sim(dev); 298 struct vhost_iotlb *iommu = vdpasim->iommu; 299 void *addr = kmalloc(size, flag); 300 int ret; 301 302 spin_lock(&vdpasim->iommu_lock); 303 if (!addr) { 304 *dma_addr = DMA_MAPPING_ERROR; 305 } else { 306 u64 pa = virt_to_phys(addr); 307 308 ret = vhost_iotlb_add_range(iommu, (u64)pa, 309 (u64)pa + size - 1, 310 pa, VHOST_MAP_RW); 311 if (ret) { 312 *dma_addr = DMA_MAPPING_ERROR; 313 kfree(addr); 314 addr = NULL; 315 } else 316 *dma_addr = (dma_addr_t)pa; 317 } 318 spin_unlock(&vdpasim->iommu_lock); 319 320 return addr; 321 } 322 323 static void vdpasim_free_coherent(struct device *dev, size_t size, 324 void *vaddr, dma_addr_t dma_addr, 325 unsigned long attrs) 326 { 327 struct vdpasim *vdpasim = dev_to_sim(dev); 328 struct vhost_iotlb *iommu = vdpasim->iommu; 329 330 spin_lock(&vdpasim->iommu_lock); 331 vhost_iotlb_del_range(iommu, (u64)dma_addr, 332 (u64)dma_addr + size - 1); 333 spin_unlock(&vdpasim->iommu_lock); 334 335 kfree(phys_to_virt((uintptr_t)dma_addr)); 336 } 337 338 static const struct dma_map_ops vdpasim_dma_ops = { 339 .map_page = vdpasim_map_page, 340 .unmap_page = vdpasim_unmap_page, 341 .alloc = vdpasim_alloc_coherent, 342 .free = vdpasim_free_coherent, 343 }; 344 345 static const struct vdpa_config_ops vdpasim_net_config_ops; 346 static const struct vdpa_config_ops vdpasim_net_batch_config_ops; 347 348 static struct vdpasim *vdpasim_create(void) 349 { 350 const struct vdpa_config_ops *ops; 351 struct vdpasim *vdpasim; 352 struct device *dev; 353 int ret = -ENOMEM; 354 355 if (batch_mapping) 356 ops = &vdpasim_net_batch_config_ops; 357 else 358 ops = &vdpasim_net_config_ops; 359 360 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM); 361 if (!vdpasim) 362 goto err_alloc; 363 364 INIT_WORK(&vdpasim->work, vdpasim_work); 365 spin_lock_init(&vdpasim->lock); 366 spin_lock_init(&vdpasim->iommu_lock); 367 368 dev = &vdpasim->vdpa.dev; 369 dev->dma_mask = &dev->coherent_dma_mask; 370 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 371 goto err_iommu; 372 set_dma_ops(dev, &vdpasim_dma_ops); 373 374 vdpasim->iommu = vhost_iotlb_alloc(2048, 0); 375 if (!vdpasim->iommu) 376 goto err_iommu; 377 378 vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 379 if (!vdpasim->buffer) 380 goto err_iommu; 381 382 if (macaddr) { 383 mac_pton(macaddr, vdpasim->config.mac); 384 if (!is_valid_ether_addr(vdpasim->config.mac)) { 385 ret = -EADDRNOTAVAIL; 386 goto err_iommu; 387 } 388 } else { 389 eth_random_addr(vdpasim->config.mac); 390 } 391 392 vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu); 393 vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu); 394 395 vdpasim->vdpa.dma_dev = dev; 396 ret = vdpa_register_device(&vdpasim->vdpa); 397 if (ret) 398 goto err_iommu; 399 400 return vdpasim; 401 402 err_iommu: 403 put_device(dev); 404 err_alloc: 405 return ERR_PTR(ret); 406 } 407 408 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx, 409 u64 desc_area, u64 driver_area, 410 u64 device_area) 411 { 412 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 413 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 414 415 vq->desc_addr = desc_area; 416 vq->driver_addr = driver_area; 417 vq->device_addr = device_area; 418 419 return 0; 420 } 421 422 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) 423 { 424 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 425 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 426 427 vq->num = num; 428 } 429 430 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx) 431 { 432 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 433 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 434 435 if (vq->ready) 436 schedule_work(&vdpasim->work); 437 } 438 439 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx, 440 struct vdpa_callback *cb) 441 { 442 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 443 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 444 445 vq->cb = cb->callback; 446 vq->private = cb->private; 447 } 448 449 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) 450 { 451 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 452 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 453 454 spin_lock(&vdpasim->lock); 455 vq->ready = ready; 456 if (vq->ready) 457 vdpasim_queue_ready(vdpasim, idx); 458 spin_unlock(&vdpasim->lock); 459 } 460 461 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) 462 { 463 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 464 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 465 466 return vq->ready; 467 } 468 469 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, 470 const struct vdpa_vq_state *state) 471 { 472 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 473 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 474 struct vringh *vrh = &vq->vring; 475 476 spin_lock(&vdpasim->lock); 477 vrh->last_avail_idx = state->avail_index; 478 spin_unlock(&vdpasim->lock); 479 480 return 0; 481 } 482 483 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, 484 struct vdpa_vq_state *state) 485 { 486 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 487 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 488 struct vringh *vrh = &vq->vring; 489 490 state->avail_index = vrh->last_avail_idx; 491 return 0; 492 } 493 494 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) 495 { 496 return VDPASIM_QUEUE_ALIGN; 497 } 498 499 static u64 vdpasim_get_features(struct vdpa_device *vdpa) 500 { 501 return vdpasim_features; 502 } 503 504 static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) 505 { 506 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 507 struct virtio_net_config *config = &vdpasim->config; 508 509 /* DMA mapping must be done by driver */ 510 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) 511 return -EINVAL; 512 513 vdpasim->features = features & vdpasim_features; 514 515 /* We generally only know whether guest is using the legacy interface 516 * here, so generally that's the earliest we can set config fields. 517 * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which 518 * implies VIRTIO_F_VERSION_1, but let's not try to be clever here. 519 */ 520 521 config->mtu = cpu_to_vdpasim16(vdpasim, 1500); 522 config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); 523 return 0; 524 } 525 526 static void vdpasim_set_config_cb(struct vdpa_device *vdpa, 527 struct vdpa_callback *cb) 528 { 529 /* We don't support config interrupt */ 530 } 531 532 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa) 533 { 534 return VDPASIM_QUEUE_MAX; 535 } 536 537 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa) 538 { 539 return VDPASIM_DEVICE_ID; 540 } 541 542 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa) 543 { 544 return VDPASIM_VENDOR_ID; 545 } 546 547 static u8 vdpasim_get_status(struct vdpa_device *vdpa) 548 { 549 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 550 u8 status; 551 552 spin_lock(&vdpasim->lock); 553 status = vdpasim->status; 554 spin_unlock(&vdpasim->lock); 555 556 return status; 557 } 558 559 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) 560 { 561 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 562 563 spin_lock(&vdpasim->lock); 564 vdpasim->status = status; 565 if (status == 0) 566 vdpasim_reset(vdpasim); 567 spin_unlock(&vdpasim->lock); 568 } 569 570 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, 571 void *buf, unsigned int len) 572 { 573 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 574 575 if (offset + len < sizeof(struct virtio_net_config)) 576 memcpy(buf, (u8 *)&vdpasim->config + offset, len); 577 } 578 579 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset, 580 const void *buf, unsigned int len) 581 { 582 /* No writable config supportted by vdpasim */ 583 } 584 585 static u32 vdpasim_get_generation(struct vdpa_device *vdpa) 586 { 587 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 588 589 return vdpasim->generation; 590 } 591 592 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa) 593 { 594 struct vdpa_iova_range range = { 595 .first = 0ULL, 596 .last = ULLONG_MAX, 597 }; 598 599 return range; 600 } 601 602 static int vdpasim_set_map(struct vdpa_device *vdpa, 603 struct vhost_iotlb *iotlb) 604 { 605 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 606 struct vhost_iotlb_map *map; 607 u64 start = 0ULL, last = 0ULL - 1; 608 int ret; 609 610 spin_lock(&vdpasim->iommu_lock); 611 vhost_iotlb_reset(vdpasim->iommu); 612 613 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; 614 map = vhost_iotlb_itree_next(map, start, last)) { 615 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start, 616 map->last, map->addr, map->perm); 617 if (ret) 618 goto err; 619 } 620 spin_unlock(&vdpasim->iommu_lock); 621 return 0; 622 623 err: 624 vhost_iotlb_reset(vdpasim->iommu); 625 spin_unlock(&vdpasim->iommu_lock); 626 return ret; 627 } 628 629 static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size, 630 u64 pa, u32 perm) 631 { 632 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 633 int ret; 634 635 spin_lock(&vdpasim->iommu_lock); 636 ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa, 637 perm); 638 spin_unlock(&vdpasim->iommu_lock); 639 640 return ret; 641 } 642 643 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) 644 { 645 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 646 647 spin_lock(&vdpasim->iommu_lock); 648 vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); 649 spin_unlock(&vdpasim->iommu_lock); 650 651 return 0; 652 } 653 654 static void vdpasim_free(struct vdpa_device *vdpa) 655 { 656 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 657 658 cancel_work_sync(&vdpasim->work); 659 kfree(vdpasim->buffer); 660 if (vdpasim->iommu) 661 vhost_iotlb_free(vdpasim->iommu); 662 } 663 664 static const struct vdpa_config_ops vdpasim_net_config_ops = { 665 .set_vq_address = vdpasim_set_vq_address, 666 .set_vq_num = vdpasim_set_vq_num, 667 .kick_vq = vdpasim_kick_vq, 668 .set_vq_cb = vdpasim_set_vq_cb, 669 .set_vq_ready = vdpasim_set_vq_ready, 670 .get_vq_ready = vdpasim_get_vq_ready, 671 .set_vq_state = vdpasim_set_vq_state, 672 .get_vq_state = vdpasim_get_vq_state, 673 .get_vq_align = vdpasim_get_vq_align, 674 .get_features = vdpasim_get_features, 675 .set_features = vdpasim_set_features, 676 .set_config_cb = vdpasim_set_config_cb, 677 .get_vq_num_max = vdpasim_get_vq_num_max, 678 .get_device_id = vdpasim_get_device_id, 679 .get_vendor_id = vdpasim_get_vendor_id, 680 .get_status = vdpasim_get_status, 681 .set_status = vdpasim_set_status, 682 .get_config = vdpasim_get_config, 683 .set_config = vdpasim_set_config, 684 .get_generation = vdpasim_get_generation, 685 .get_iova_range = vdpasim_get_iova_range, 686 .dma_map = vdpasim_dma_map, 687 .dma_unmap = vdpasim_dma_unmap, 688 .free = vdpasim_free, 689 }; 690 691 static const struct vdpa_config_ops vdpasim_net_batch_config_ops = { 692 .set_vq_address = vdpasim_set_vq_address, 693 .set_vq_num = vdpasim_set_vq_num, 694 .kick_vq = vdpasim_kick_vq, 695 .set_vq_cb = vdpasim_set_vq_cb, 696 .set_vq_ready = vdpasim_set_vq_ready, 697 .get_vq_ready = vdpasim_get_vq_ready, 698 .set_vq_state = vdpasim_set_vq_state, 699 .get_vq_state = vdpasim_get_vq_state, 700 .get_vq_align = vdpasim_get_vq_align, 701 .get_features = vdpasim_get_features, 702 .set_features = vdpasim_set_features, 703 .set_config_cb = vdpasim_set_config_cb, 704 .get_vq_num_max = vdpasim_get_vq_num_max, 705 .get_device_id = vdpasim_get_device_id, 706 .get_vendor_id = vdpasim_get_vendor_id, 707 .get_status = vdpasim_get_status, 708 .set_status = vdpasim_set_status, 709 .get_config = vdpasim_get_config, 710 .set_config = vdpasim_set_config, 711 .get_generation = vdpasim_get_generation, 712 .get_iova_range = vdpasim_get_iova_range, 713 .set_map = vdpasim_set_map, 714 .free = vdpasim_free, 715 }; 716 717 static int __init vdpasim_dev_init(void) 718 { 719 vdpasim_dev = vdpasim_create(); 720 721 if (!IS_ERR(vdpasim_dev)) 722 return 0; 723 724 return PTR_ERR(vdpasim_dev); 725 } 726 727 static void __exit vdpasim_dev_exit(void) 728 { 729 struct vdpa_device *vdpa = &vdpasim_dev->vdpa; 730 731 vdpa_unregister_device(vdpa); 732 } 733 734 module_init(vdpasim_dev_init) 735 module_exit(vdpasim_dev_exit) 736 737 MODULE_VERSION(DRV_VERSION); 738 MODULE_LICENSE(DRV_LICENSE); 739 MODULE_AUTHOR(DRV_AUTHOR); 740 MODULE_DESCRIPTION(DRV_DESC); 741